repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
NSVF
|
NSVF-main/fairnr/modules/renderer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairnr.modules.module_utils import FCLayer
from fairnr.data.geometry import ray
MAX_DEPTH = 10000.0
RENDERER_REGISTRY = {}
def register_renderer(name):
def register_renderer_cls(cls):
if name in RENDERER_REGISTRY:
raise ValueError('Cannot register duplicate module ({})'.format(name))
RENDERER_REGISTRY[name] = cls
return cls
return register_renderer_cls
def get_renderer(name):
if name not in RENDERER_REGISTRY:
raise ValueError('Cannot find module {}'.format(name))
return RENDERER_REGISTRY[name]
@register_renderer('abstract_renderer')
class Renderer(nn.Module):
"""
Abstract class for ray marching
"""
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, **kwargs):
raise NotImplementedError
@staticmethod
def add_args(parser):
pass
@register_renderer('volume_rendering')
class VolumeRenderer(Renderer):
def __init__(self, args):
super().__init__(args)
self.chunk_size = 1024 * getattr(args, "chunk_size", 64)
self.valid_chunk_size = 1024 * getattr(args, "valid_chunk_size", self.chunk_size // 1024)
self.discrete_reg = getattr(args, "discrete_regularization", False)
self.raymarching_tolerance = getattr(args, "raymarching_tolerance", 0.0)
self.trace_normal = getattr(args, "trace_normal", False)
@staticmethod
def add_args(parser):
# ray-marching parameters
parser.add_argument('--discrete-regularization', action='store_true',
help='if set, a zero mean unit variance gaussian will be added to encougrage discreteness')
# additional arguments
parser.add_argument('--chunk-size', type=int, metavar='D',
help='set chunks to go through the network (~K forward passes). trade time for memory. ')
parser.add_argument('--valid-chunk-size', type=int, metavar='D',
help='chunk size used when no training. In default the same as chunk-size.')
parser.add_argument('--raymarching-tolerance', type=float, default=0)
parser.add_argument('--trace-normal', action='store_true')
def forward_once(
self, input_fn, field_fn, ray_start, ray_dir, samples, encoder_states,
early_stop=None, output_types=['sigma', 'texture']
):
"""
chunks: set > 1 if out-of-memory. it can save some memory by time.
"""
sampled_depth = samples['sampled_point_depth']
sampled_idx = samples['sampled_point_voxel_idx'].long()
# only compute when the ray hits
sample_mask = sampled_idx.ne(-1)
if early_stop is not None:
sample_mask = sample_mask & (~early_stop.unsqueeze(-1))
if sample_mask.sum() == 0: # miss everything skip
return None, 0
sampled_xyz = ray(ray_start.unsqueeze(1), ray_dir.unsqueeze(1), sampled_depth.unsqueeze(2))
sampled_dir = ray_dir.unsqueeze(1).expand(*sampled_depth.size(), ray_dir.size()[-1])
samples['sampled_point_xyz'] = sampled_xyz
samples['sampled_point_ray_direction'] = sampled_dir
# apply mask
samples = {name: s[sample_mask] for name, s in samples.items()}
# get encoder features as inputs
field_inputs = input_fn(samples, encoder_states)
# forward implicit fields
field_outputs = field_fn(field_inputs, outputs=output_types)
outputs = {'sample_mask': sample_mask}
def masked_scatter(mask, x):
B, K = mask.size()
if x.dim() == 1:
return x.new_zeros(B, K).masked_scatter(mask, x)
return x.new_zeros(B, K, x.size(-1)).masked_scatter(
mask.unsqueeze(-1).expand(B, K, x.size(-1)), x)
# post processing
if 'sigma' in field_outputs:
sigma, sampled_dists= field_outputs['sigma'], field_inputs['dists']
noise = 0 if not self.discrete_reg and (not self.training) else torch.zeros_like(sigma).normal_()
free_energy = torch.relu(noise + sigma) * sampled_dists
free_energy = free_energy * 7.0 # ? [debug]
# (optional) free_energy = (F.elu(sigma - 3, alpha=1) + 1) * dists
# (optional) free_energy = torch.abs(sigma) * sampled_dists ## ??
outputs['free_energy'] = masked_scatter(sample_mask, free_energy)
if 'sdf' in field_outputs:
outputs['sdf'] = masked_scatter(sample_mask, field_outputs['sdf'])
if 'texture' in field_outputs:
outputs['texture'] = masked_scatter(sample_mask, field_outputs['texture'])
if 'normal' in field_outputs:
outputs['normal'] = masked_scatter(sample_mask, field_outputs['normal'])
if 'feat_n2' in field_outputs:
outputs['feat_n2'] = masked_scatter(sample_mask, field_outputs['feat_n2'])
return outputs, sample_mask.sum()
def forward_chunk(
self, input_fn, field_fn, ray_start, ray_dir, samples, encoder_states,
gt_depths=None, output_types=['sigma', 'texture'], global_weights=None,
):
if self.trace_normal:
output_types += ['normal']
sampled_depth = samples['sampled_point_depth']
sampled_idx = samples['sampled_point_voxel_idx'].long()
original_depth = samples.get('original_point_depth', None)
tolerance = self.raymarching_tolerance
chunk_size = self.chunk_size if self.training else self.valid_chunk_size
early_stop = None
if tolerance > 0:
tolerance = -math.log(tolerance)
hits = sampled_idx.ne(-1).long()
outputs = defaultdict(lambda: [])
size_so_far, start_step = 0, 0
accumulated_free_energy = 0
accumulated_evaluations = 0
for i in range(hits.size(1) + 1):
if ((i == hits.size(1)) or (size_so_far + hits[:, i].sum() > chunk_size)) and (i > start_step):
_outputs, _evals = self.forward_once(
input_fn, field_fn,
ray_start, ray_dir,
{name: s[:, start_step: i]
for name, s in samples.items()},
encoder_states,
early_stop=early_stop,
output_types=output_types)
if _outputs is not None:
accumulated_evaluations += _evals
if 'free_energy' in _outputs:
accumulated_free_energy += _outputs['free_energy'].sum(1)
if tolerance > 0:
early_stop = accumulated_free_energy > tolerance
hits[early_stop] *= 0
for key in _outputs:
outputs[key] += [_outputs[key]]
else:
for key in outputs:
outputs[key] += [outputs[key][-1].new_zeros(
outputs[key][-1].size(0),
sampled_depth[:, start_step: i].size(1),
*outputs[key][-1].size()[2:]
)]
start_step, size_so_far = i, 0
if (i < hits.size(1)):
size_so_far += hits[:, i].sum()
outputs = {key: torch.cat(outputs[key], 1) for key in outputs}
results = {}
if 'free_energy' in outputs:
free_energy = outputs['free_energy']
shifted_free_energy = torch.cat([free_energy.new_zeros(sampled_depth.size(0), 1), free_energy[:, :-1]], dim=-1) # shift one step
a = 1 - torch.exp(-free_energy.float()) # probability of it is not empty here
b = torch.exp(-torch.cumsum(shifted_free_energy.float(), dim=-1)) # probability of everything is empty up to now
probs = (a * b).type_as(free_energy) # probability of the ray hits something here
else:
probs = outputs['sample_mask'].type_as(sampled_depth) / sampled_depth.size(-1) # assuming a uniform distribution
if global_weights is not None:
probs = probs * global_weights
depth = (sampled_depth * probs).sum(-1)
missed = 1 - probs.sum(-1)
results.update({
'probs': probs, 'depths': depth,
'max_depths': sampled_depth.masked_fill(hits.eq(0), -1).max(1).values,
'min_depths': sampled_depth.min(1).values,
'missed': missed, 'ae': accumulated_evaluations
})
if original_depth is not None:
results['z'] = (original_depth * probs).sum(-1)
if 'texture' in outputs:
results['colors'] = (outputs['texture'] * probs.unsqueeze(-1)).sum(-2)
if 'normal' in outputs:
results['normal'] = (outputs['normal'] * probs.unsqueeze(-1)).sum(-2)
if not self.trace_normal:
results['eikonal-term'] = (outputs['normal'].norm(p=2, dim=-1) - 1) ** 2
else:
results['eikonal-term'] = torch.log((outputs['normal'] ** 2).sum(-1) + 1e-6)
results['eikonal-term'] = results['eikonal-term'][sampled_idx.ne(-1)]
if 'feat_n2' in outputs:
results['feat_n2'] = (outputs['feat_n2'] * probs).sum(-1)
results['regz-term'] = outputs['feat_n2'][sampled_idx.ne(-1)]
return results
def forward(self, input_fn, field_fn, ray_start, ray_dir, samples, *args, **kwargs):
chunk_size = self.chunk_size if self.training else self.valid_chunk_size
if ray_start.size(0) <= chunk_size:
results = self.forward_chunk(input_fn, field_fn, ray_start, ray_dir, samples, *args, **kwargs)
else:
# the number of rays is larger than maximum forward passes. pre-chuncking..
results = [
self.forward_chunk(input_fn, field_fn,
ray_start[i: i+chunk_size], ray_dir[i: i+chunk_size],
{name: s[i: i+chunk_size] for name, s in samples.items()}, *args, **kwargs)
for i in range(0, ray_start.size(0), chunk_size)
]
results = {name: torch.cat([r[name] for r in results], 0)
if results[0][name].dim() > 0 else sum([r[name] for r in results])
for name in results[0]}
if getattr(input_fn, "track_max_probs", False) and (not self.training):
input_fn.track_voxel_probs(samples['sampled_point_voxel_idx'].long(), results['probs'])
return results
@register_renderer('surface_volume_rendering')
class SurfaceVolumeRenderer(VolumeRenderer):
def forward_chunk(
self, input_fn, field_fn, ray_start, ray_dir, samples, encoder_states,
gt_depths=None, output_types=['sigma', 'texture'], global_weights=None,
):
results = super().forward_chunk(
input_fn, field_fn, ray_start, ray_dir, samples, encoder_states,
output_types=['sigma', 'normal'])
# render at the "intersection"
n_probs = results['probs'].clamp(min=1e-6).masked_fill(samples['sampled_point_voxel_idx'].eq(-1), 0)
n_depth = (samples['sampled_point_depth'] * n_probs).sum(-1, keepdim=True) / n_probs.sum(-1, keepdim=True).clamp(min=1e-6)
n_bound = samples['sampled_point_depth'] + samples['sampled_point_distance'] / 2
n_vidxs = ((n_depth - n_bound) >= 0).sum(-1, keepdim=True)
n_vidxs = samples['sampled_point_voxel_idx'].gather(1, n_vidxs)
new_samples = {
'sampled_point_depth': n_depth,
'sampled_point_distance': torch.ones_like(n_depth) * 1e-3, # dummy distance. not useful.
'sampled_point_voxel_idx': n_vidxs,
}
new_results, _ = self.forward_once(input_fn, field_fn, ray_start, ray_dir, new_samples, encoder_states)
results['colors'] = new_results['texture'].squeeze(1) * (1 - results['missed'][:, None])
results['normal'] = new_results['normal'].squeeze(1)
results['eikonal-term'] = torch.cat([results['eikonal-term'], (results['normal'].norm(p=2, dim=-1) - 1) ** 2], 0)
return results
| 12,718 | 44.102837 | 141 |
py
|
NSVF
|
NSVF-main/fairnr/modules/reader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import random, os, glob
from fairnr.data.geometry import get_ray_direction, r6d2mat
torch.autograd.set_detect_anomaly(True)
TINY = 1e-9
READER_REGISTRY = {}
def register_reader(name):
def register_reader_cls(cls):
if name in READER_REGISTRY:
raise ValueError('Cannot register duplicate module ({})'.format(name))
READER_REGISTRY[name] = cls
return cls
return register_reader_cls
def get_reader(name):
if name not in READER_REGISTRY:
raise ValueError('Cannot find module {}'.format(name))
return READER_REGISTRY[name]
@register_reader('abstract_reader')
class Reader(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, **kwargs):
raise NotImplementedError
@staticmethod
def add_args(parser):
pass
@register_reader('image_reader')
class ImageReader(Reader):
"""
basic image reader
"""
def __init__(self, args):
super().__init__(args)
self.num_pixels = args.pixel_per_view
self.no_sampling = getattr(args, "no_sampling_at_reader", False)
self.deltas = None
self.all_data = self.find_data()
if getattr(args, "trainable_extrinsics", False):
self.all_data_idx = {data_img: (s, v)
for s, data in enumerate(self.all_data)
for v, data_img in enumerate(data)}
self.deltas = nn.ParameterList([
nn.Parameter(torch.tensor(
[[1., 0., 0., 0., 1., 0., 0., 0., 0.]]).repeat(len(data), 1))
for data in self.all_data])
def find_data(self):
paths = self.args.data
if os.path.isdir(paths):
self.paths = [paths]
else:
self.paths = [line.strip() for line in open(paths)]
return [sorted(glob.glob("{}/rgb/*".format(p))) for p in self.paths]
@staticmethod
def add_args(parser):
parser.add_argument('--pixel-per-view', type=float, metavar='N',
help='number of pixels sampled for each view')
parser.add_argument("--sampling-on-mask", nargs='?', const=0.9, type=float,
help="this value determined the probability of sampling rays on masks")
parser.add_argument("--sampling-at-center", type=float,
help="only useful for training where we restrict sampling at center of the image")
parser.add_argument("--sampling-on-bbox", action='store_true',
help="sampling points to close to the mask")
parser.add_argument("--sampling-patch-size", type=int,
help="sample pixels based on patches instead of independent pixels")
parser.add_argument("--sampling-skipping-size", type=int,
help="sample pixels if we have skipped pixels")
parser.add_argument("--no-sampling-at-reader", action='store_true',
help="do not perform sampling.")
parser.add_argument("--trainable-extrinsics", action='store_true',
help="if set, we assume extrinsics are trainable. We use 6D representations for rotation")
def forward(self, uv, intrinsics, extrinsics, size, path=None, **kwargs):
S, V = uv.size()[:2]
if (not self.training) or self.no_sampling:
uv = uv.reshape(S, V, 2, -1, 1, 1)
flatten_uv = uv.reshape(S, V, 2, -1)
else:
uv, _ = self.sample_pixels(uv, size, **kwargs)
flatten_uv = uv.reshape(S, V, 2, -1)
# go over all shapes
ray_start, ray_dir = [[] for _ in range(S)], [[] for _ in range(S)]
for s in range(S):
for v in range(V):
ixt = intrinsics[s] if intrinsics.dim() == 3 else intrinsics[s, v]
ext = extrinsics[s, v]
translation, rotation = ext[:3, 3], ext[:3, :3]
if (self.deltas is not None) and (path is not None):
shape_id, view_id = self.all_data_idx[path[s][v]]
delta = self.deltas[shape_id][view_id]
d_t, d_r = delta[6:], r6d2mat(delta[None, :6]).squeeze(0)
rotation = rotation @ d_r
translation = translation + d_t
ext = torch.cat([torch.cat([rotation, translation[:, None]], 1), ext[3:]], 0)
ray_start[s] += [translation]
ray_dir[s] += [get_ray_direction(translation, flatten_uv[s, v], ixt, ext, 1)]
ray_start = torch.stack([torch.stack(r) for r in ray_start])
ray_dir = torch.stack([torch.stack(r) for r in ray_dir])
return ray_start.unsqueeze(-2), ray_dir.transpose(2, 3), uv
@torch.no_grad()
def sample_pixels(self, uv, size, alpha=None, mask=None, **kwargs):
H, W = int(size[0,0,0]), int(size[0,0,1])
S, V = uv.size()[:2]
if mask is None:
if alpha is not None:
mask = (alpha > 0)
else:
mask = uv.new_ones(S, V, uv.size(-1)).bool()
mask = mask.float().reshape(S, V, H, W)
if self.args.sampling_at_center < 1.0:
r = (1 - self.args.sampling_at_center) / 2.0
mask0 = mask.new_zeros(S, V, H, W)
mask0[:, :, int(H * r): H - int(H * r), int(W * r): W - int(W * r)] = 1
mask = mask * mask0
if self.args.sampling_on_bbox:
x_has_points = mask.sum(2, keepdim=True) > 0
y_has_points = mask.sum(3, keepdim=True) > 0
mask = (x_has_points & y_has_points).float()
probs = mask / (mask.sum() + 1e-8)
if self.args.sampling_on_mask > 0.0:
probs = self.args.sampling_on_mask * probs + (1 - self.args.sampling_on_mask) * 1.0 / (H * W)
num_pixels = int(self.args.pixel_per_view)
patch_size, skip_size = self.args.sampling_patch_size, self.args.sampling_skipping_size
C = patch_size * skip_size
if C > 1:
probs = probs.reshape(S, V, H // C, C, W // C, C).sum(3).sum(-1)
num_pixels = num_pixels // patch_size // patch_size
flatten_probs = probs.reshape(S, V, -1)
sampled_index = sampling_without_replacement(torch.log(flatten_probs+ TINY), num_pixels)
sampled_masks = torch.zeros_like(flatten_probs).scatter_(-1, sampled_index, 1).reshape(S, V, H // C, W // C)
if C > 1:
sampled_masks = sampled_masks[:, :, :, None, :, None].repeat(
1, 1, 1, patch_size, 1, patch_size).reshape(S, V, H // skip_size, W // skip_size)
if skip_size > 1:
full_datamask = sampled_masks.new_zeros(S, V, skip_size * skip_size, H // skip_size, W // skip_size)
full_index = torch.randint(skip_size*skip_size, (S, V))
for i in range(S):
for j in range(V):
full_datamask[i, j, full_index[i, j]] = sampled_masks[i, j]
sampled_masks = full_datamask.reshape(
S, V, skip_size, skip_size, H // skip_size, W // skip_size).permute(0, 1, 4, 2, 5, 3).reshape(S, V, H, W)
X, Y = uv[:,:,0].reshape(S, V, H, W), uv[:,:,1].reshape(S, V, H, W)
X = X[sampled_masks>0].reshape(S, V, 1, -1, patch_size, patch_size)
Y = Y[sampled_masks>0].reshape(S, V, 1, -1, patch_size, patch_size)
return torch.cat([X, Y], 2), sampled_masks
def sampling_without_replacement(logp, k):
def gumbel_like(u):
return -torch.log(-torch.log(torch.rand_like(u) + TINY) + TINY)
scores = logp + gumbel_like(logp)
return scores.topk(k, dim=-1)[1]
| 7,959 | 42.736264 | 125 |
py
|
NSVF
|
NSVF-main/fairnr/modules/field.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad
from collections import OrderedDict
from fairnr.modules.implicit import (
ImplicitField, SignedDistanceField,
TextureField, HyperImplicitField, BackgroundField
)
from fairnr.modules.module_utils import NeRFPosEmbLinear
FIELD_REGISTRY = {}
def register_field(name):
def register_field_cls(cls):
if name in FIELD_REGISTRY:
raise ValueError('Cannot register duplicate module ({})'.format(name))
FIELD_REGISTRY[name] = cls
return cls
return register_field_cls
def get_field(name):
if name not in FIELD_REGISTRY:
raise ValueError('Cannot find module {}'.format(name))
return FIELD_REGISTRY[name]
@register_field('abstract_field')
class Field(nn.Module):
"""
Abstract class for implicit functions
"""
def __init__(self, args):
super().__init__()
self.args = args
self.updates = -1
def forward(self, **kwargs):
raise NotImplementedError
@staticmethod
def add_args(parser):
pass
def set_num_updates(self, num_updates):
self.updates = num_updates
@register_field('radiance_field')
class RaidanceField(Field):
def __init__(self, args):
super().__init__(args)
# additional arguments
self.chunk_size = getattr(args, "chunk_size", 256) * 256
self.deterministic_step = getattr(args, "deterministic_step", False)
# background field
self.min_color = getattr(args, "min_color", -1)
self.trans_bg = getattr(args, "transparent_background", "1.0,1.0,1.0")
self.sgbg = getattr(args, "background_stop_gradient", False)
self.bg_color = BackgroundField(bg_color=self.trans_bg, min_color=self.min_color, stop_grad=self.sgbg)
# MLP specs
self.nerf_style = getattr(args, "nerf_style_mlp", False) # NeRF style MLPs
self.with_ln = not getattr(args, "no_layernorm_mlp", False)
self.skips = getattr(args, "feature_field_skip_connect", None)
self.skips = [self.skips] if self.skips is not None else None
# input specs
self.den_filters, self.den_ori_dims, self.den_input_dims = self.parse_inputs(args.inputs_to_density)
self.tex_filters, self.tex_ori_dims, self.tex_input_dims = self.parse_inputs(args.inputs_to_texture)
self.den_filters, self.tex_filters = nn.ModuleDict(self.den_filters), nn.ModuleDict(self.tex_filters)
# build networks
self.build_feature_field(args)
self.build_density_predictor(args)
self.build_texture_renderer(args)
if getattr(args, "zero_z_steps", 0) > 0:
self.register_buffer("zero_z", torch.scalar_tensor(1)) # it will be saved to checkpoint
else:
self.zero_z = 0
def set_num_updates(self, updates):
self.updates = updates
if getattr(self.args, "zero_z_steps", 0) <= self.updates:
self.zero_z = self.zero_z * 0
def build_feature_field(self, args):
den_feat_dim = self.tex_input_dims[0]
den_input_dim, tex_input_dim = sum(self.den_input_dims), sum(self.tex_input_dims)
if not getattr(args, "hypernetwork", False):
self.feature_field = ImplicitField(
den_input_dim, den_feat_dim, args.feature_embed_dim,
args.feature_layers + 2 if not self.nerf_style else 8, # +2 is to adapt to old code
with_ln=self.with_ln if not self.nerf_style else False,
skips=self.skips if not self.nerf_style else [4],
spec_init=True if not self.nerf_style else False)
else:
assert (not self.nerf_style), "Hypernetwork does not support NeRF style MLPs yet."
den_contxt_dim = self.den_input_dims[-1]
self.feature_field = HyperImplicitField(
den_contxt_dim, den_input_dim - den_contxt_dim,
den_feat_dim, args.feature_embed_dim, args.feature_layers + 2) # +2 is to adapt to old code
def build_density_predictor(self, args):
den_feat_dim = self.tex_input_dims[0]
self.predictor = SignedDistanceField(
den_feat_dim, args.density_embed_dim, recurrent=False, num_layers=1,
with_ln=self.with_ln if not self.nerf_style else False,
spec_init=True if not self.nerf_style else False)
def build_texture_renderer(self, args):
tex_input_dim = sum(self.tex_input_dims)
self.renderer = TextureField(
tex_input_dim, args.texture_embed_dim,
args.texture_layers + 2 if not self.nerf_style else 2,
with_ln=self.with_ln if not self.nerf_style else False,
spec_init=True if not self.nerf_style else False)
def parse_inputs(self, arguments):
def fillup(p):
assert len(p) > 0
default = 'b' if (p[0] != 'ray') and (p[0] != 'normal') else 'a'
if len(p) == 1:
return [p[0], 0, 3, default]
elif len(p) == 2:
return [p[0], int(p[1]), 3, default]
elif len(p) == 3:
return [p[0], int(p[1]), int(p[2]), default]
return [p[0], int(p[1]), int(p[2]), p[3]]
filters, input_dims, output_dims = OrderedDict(), [], []
for p in arguments.split(','):
name, pos_dim, base_dim, pos_type = fillup([a.strip() for a in p.strip().split(':')])
if pos_dim > 0: # use positional embedding
func = NeRFPosEmbLinear(
base_dim, base_dim * pos_dim * 2,
angular=(pos_type == 'a'),
no_linear=True,
cat_input=(pos_type == 'b'))
odim = func.out_dim + func.in_dim if func.cat_input else func.out_dim
else:
func = nn.Identity()
odim = base_dim
input_dims += [base_dim]
output_dims += [odim]
filters[name] = func
return filters, input_dims, output_dims
@staticmethod
def add_args(parser):
parser.add_argument('--inputs-to-density', type=str,
help="""
Types of inputs to predict the density.
Choices of types are emb or pos.
use first . to assign sinsudoal frequency.
use second : to assign the input dimension (in default 3).
use third : to set the type -> basic, angular or gaussian
Size must match
e.g. --inputs-to-density emb:6:32,pos:4
""")
parser.add_argument('--inputs-to-texture', type=str,
help="""
Types of inputs to predict the texture.
Choices of types are feat, emb, ray, pos or normal.
""")
parser.add_argument('--nerf-style-mlp', action='store_true',
help='use NeRF style MLPs for implicit function (with skip-connection).')
parser.add_argument('--no-layernorm-mlp', action='store_true',
help='do not use layernorm in MLPs.')
parser.add_argument('--feature-field-skip-connect', type=int,
help='add skip-connection in the feature field.')
parser.add_argument('--feature-embed-dim', type=int, metavar='N',
help='field hidden dimension for FFN')
parser.add_argument('--density-embed-dim', type=int, metavar='N',
help='hidden dimension of density prediction'),
parser.add_argument('--texture-embed-dim', type=int, metavar='N',
help='hidden dimension of texture prediction')
parser.add_argument('--feature-layers', type=int, metavar='N',
help='number of FC layers used to encode')
parser.add_argument('--texture-layers', type=int, metavar='N',
help='number of FC layers used to predict colors')
parser.add_argument('--no-normalize-normal', action='store_true',
help='if set, do not normalize the gradient of density as the normal direction.')
parser.add_argument('--zero-z-steps', type=int, default=0)
# specific parameters (hypernetwork does not work right now)
parser.add_argument('--hypernetwork', action='store_true',
help='use hypernetwork to model feature')
parser.add_argument('--hyper-feature-embed-dim', type=int, metavar='N',
help='feature dimension used to predict the hypernetwork. consistent with context embedding')
# backgound parameters
parser.add_argument('--background-depth', type=float,
help='the depth of background. used for depth visualization')
parser.add_argument('--background-stop-gradient', action='store_true',
help='do not optimize the background color')
@torch.enable_grad() # tracking the gradient in case we need to have normal at testing time.
def forward(self, inputs, outputs=['sigma', 'texture']):
filtered_inputs, context = [], None
if inputs.get('feat', None) is None:
for i, name in enumerate(self.den_filters):
d_in, func = self.den_ori_dims[i], self.den_filters[name]
assert (name in inputs), "the encoder must contain target inputs"
assert inputs[name].size(-1) == d_in, "{} dimension must match {} v.s. {}".format(
name, inputs[name].size(-1), d_in)
if name == 'context':
assert (i == (len(self.den_filters) - 1)), "we force context as the last input"
assert inputs[name].size(0) == 1, "context is object level"
context = func(inputs[name])
else:
filtered_inputs += [func(inputs[name])]
filtered_inputs = torch.cat(filtered_inputs, -1)
if context is not None:
if getattr(self.args, "hypernetwork", False):
filtered_inputs = (filtered_inputs, context)
else:
filtered_inputs = (torch.cat([filtered_inputs, context.expand(filtered_inputs.size(0), context.size(1))], -1),)
else:
filtered_inputs = (filtered_inputs, )
inputs['feat'] = self.feature_field(*filtered_inputs)
if 'sigma' in outputs:
assert 'feat' in inputs, "feature must be pre-computed"
inputs['sigma'] = self.predictor(inputs['feat'])[0]
if ('normal' not in inputs) and (
(('texture' in outputs) and ("normal" in self.tex_filters))
or ("normal" in outputs)):
assert 'sigma' in inputs, "sigma must be pre-computed"
assert 'pos' in inputs, "position is used to compute sigma"
grad_pos, = grad(
outputs=inputs['sigma'], inputs=inputs['pos'],
grad_outputs=torch.ones_like(inputs['sigma'], requires_grad=False),
retain_graph=True, create_graph=True)
if not getattr(self.args, "no_normalize_normal", False):
inputs['normal'] = F.normalize(-grad_pos, p=2, dim=1) # BUG: gradient direction reversed.
else:
inputs['normal'] = -grad_pos # no normalization. magnitude also has information?
if 'texture' in outputs:
filtered_inputs = []
if self.zero_z == 1:
inputs['feat'] = inputs['feat'] * 0.0 # zero-out latent feature
inputs['feat_n2'] = (inputs['feat'] ** 2).sum(-1)
for i, name in enumerate(self.tex_filters):
d_in, func = self.tex_ori_dims[i], self.tex_filters[name]
assert (name in inputs), "the encoder must contain target inputs"
filtered_inputs += [func(inputs[name])] if name != 'sigma' else [func(inputs[name].unsqueeze(-1))]
filtered_inputs = torch.cat(filtered_inputs, -1)
inputs['texture'] = self.renderer(filtered_inputs)
if self.min_color == 0:
inputs['texture'] = torch.sigmoid(inputs['texture'])
return inputs
@register_field('sdf_radiance_field')
class SDFRaidanceField(RaidanceField):
@staticmethod
def add_args(parser):
parser.add_argument('--reg-z', action='store_true', help='regularize latent feature')
parser.add_argument('--dropout-z', type=float, default=0.0)
parser.add_argument('--project-to-surface', action='store_true',
help='project any point to the surface to obtain color.')
RaidanceField.add_args(parser)
def build_feature_field(self, args):
den_feat_dim = self.tex_input_dims[0]
den_input_dim, tex_input_dim = sum(self.den_input_dims), sum(self.tex_input_dims)
assert not getattr(args, "hypernetwork", False), "does not support hypernetwork for now"
assert (den_input_dim == 3) or (
self.den_filters['pos'].cat_input and len(self.den_filters) == 1), "cat pos in the end"
num_layers = args.feature_layers + 2 if not self.nerf_style else 8
skips = self.skips if not self.nerf_style else [4]
self.feature_field = ImplicitField(
den_input_dim, den_feat_dim + 1, args.feature_embed_dim, # +1 is for SDF values
num_layers, with_ln=False, skips=skips, outmost_linear=True, spec_init=False)
if getattr(args, "dropout_z", 0.0) > 0.0:
self.dropout_z = nn.Dropout(p=self.args.dropout_z)
else:
self.dropout_z = None
"""
Geometric initialization from https://arxiv.org/pdf/1911.10414.pdf
This enforce a model to approximate a SDF function:
f(x; \theta) \approx |x| - 1
"""
bias = 1.0
for l in range(num_layers):
lin = self.feature_field.net[l]
if l < num_layers - 1:
lin = lin.net[0]
if l == num_layers - 1: # last layer
torch.nn.init.normal_(lin.weight, mean=math.sqrt(math.pi) / math.sqrt(lin.weight.size(1)), std=0.0001)
torch.nn.init.constant_(lin.bias, -bias)
elif l == 0:
torch.nn.init.constant_(lin.bias, 0.0)
if den_input_dim > 3:
torch.nn.init.constant_(lin.weight[:, :-3], 0.0)
torch.nn.init.normal_(lin.weight[:, -3:], 0.0, math.sqrt(2) / math.sqrt(lin.weight.size(0)))
elif (l - 1) in skips:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, math.sqrt(2) / math.sqrt(lin.weight.size(0)))
torch.nn.init.constant_(lin.weight[:, :den_input_dim-3], 0.0)
else:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, math.sqrt(2) / math.sqrt(lin.weight.size(0)))
# force the initial fearures to 0
self.feature_field.net[7].weight.data[1:] = self.feature_field.net[7].weight.data[1:] * 0.0
self.feature_field.net[7].bias.data[1:] = self.feature_field.net[7].bias.data[1:] * 0.0
def build_density_predictor(self, args):
class Sdf2Densityv1(nn.Module):
def __init__(self):
super().__init__()
self.alpha = nn.Parameter(torch.scalar_tensor(10.0), requires_grad=True)
self.sigma = nn.Parameter(torch.scalar_tensor(50.0), requires_grad=True)
def forward(self, x):
return self.sigma * torch.tanh(-torch.abs(self.alpha) * x[:, 0]), None
class Sdf2Densityv2(nn.Module):
def __init__(self):
super().__init__()
self.sigma = nn.Parameter(torch.scalar_tensor(50.0), requires_grad=True)
# self.alpha = nn.Parameter(torch.scalar_tensor(0.05), requires_grad=True)
def forward(self, x):
return -self.sigma * x[:, 0], None
class Sdf2Densityv3(nn.Module):
def __init__(self):
super().__init__()
self.sigma = nn.Parameter(torch.scalar_tensor(100.0), requires_grad=True)
# self.alpha = nn.Parameter(torch.scalar_tensor(0.05), requires_grad=True)
def forward(self, x):
return F.elu(-self.sigma * x[:, 0]), None
self.predictor = Sdf2Densityv1()
@torch.enable_grad()
def forward(self, inputs, outputs=['sigma', 'texture']):
if ('sigma' in outputs):
inputs = super().forward(inputs, ['sigma'])
inputs['sdf'] = inputs['feat'][:, 0]
inputs['feat'] = inputs['feat'][:, 1:] # remove sdf from feature
if (getattr(self.args, "zero_z_steps", 0) > self.updates) and self.training:
inputs['feat'] = inputs['feat'] * 0.0 # zero-out latent feature
if self.dropout_z is not None:
inputs['feat'] = self.dropout_z(inputs['feat']) # apply dropout on the feature.
if ('texture' in outputs) or ('normal' in outputs):
# compute gradient for sdf, no need to normalize them
inputs['normal'] = grad(
outputs=inputs['sdf'], inputs=inputs['pos'],
grad_outputs=torch.ones_like(inputs['sdf'], requires_grad=False),
retain_graph=True, create_graph=True)[0]
# compute color for points projected on the surface
if getattr(self.args, "project_to_surface", False):
inputs['pos'] = inputs['pos'] - inputs['sdf'][:, None] * inputs['normal']
inputs['feat'] = None
inputs = super().forward(inputs, outputs=['feat'])
inputs['feat'] = inputs['feat'][:, 1:]
inputs['feat_n2'] = (inputs['feat'] ** 2).sum(-1)
if 'texture' in outputs:
inputs = super().forward(inputs, ['texture'])
return inputs
@register_field('disentangled_radiance_field')
class DisentangledRaidanceField(RaidanceField):
def __init__(self, args):
super().__init__(args)
# for now we fix the input types
assert [name for name in self.tex_filters][:4] == ['feat', 'pos', 'normal', 'ray']
lt_in_dim = self.tex_input_dims[2] + self.tex_input_dims[3]
lg_in_dim = self.tex_input_dims[0] + self.tex_input_dims[1]
if len(self.tex_filters) > 4:
lt_in_dim += sum(self.tex_input_dims[4:])
lg_in_dim += sum(self.tex_input_dims[4:])
# rebuild the renderer
self.D = getattr(args, "compressed_light_dim", 64) # D
self.renderer = nn.ModuleDict(
{
"light-transport": nn.Sequential(
ImplicitField(
in_dim=lt_in_dim,
out_dim=self.D * 3,
hidden_dim=args.texture_embed_dim,
num_layers=args.texture_layers,
outmost_linear=True
), nn.Sigmoid()), # f(v, n, w)
"lighting": nn.Sequential(
ImplicitField(
in_dim=lg_in_dim,
out_dim=self.D * 3,
hidden_dim=args.texture_embed_dim,
num_layers=args.texture_layers,
outmost_linear=True
), nn.ReLU()), # v(x, z, w)
}
)
@staticmethod
def add_args(parser):
RaidanceField.add_args(parser)
parser.add_argument('---compressed-light-dim', type=int,
help='instead of sampling light directions physically, we compressed the light directions')
@torch.enable_grad() # tracking the gradient in case we need to have normal at testing time.
def forward(self, inputs, outputs=['sigma', 'texture']):
h_g, h_brdf, h_l = None, None, None
if inputs.get('context', None) is not None:
h_g, h_brdf, h_l = [inputs['context'][k:k+1] for k in range(3)]
inputs['context'] = h_g
inputs = super().forward(inputs, outputs=['sigma', 'normal'])
if 'texture' in outputs:
lt_inputs = [self.tex_filters['normal'](inputs['normal']), self.tex_filters['ray'](inputs['ray'])]
if h_brdf is not None:
lt_inputs += [self.tex_filters['context'](h_brdf).expand(lt_inputs[0].size(0), -1)]
li_inputs = [self.tex_filters['feat'](inputs['feat']), self.tex_filters['pos'](inputs['pos'])]
if h_l is not None:
li_inputs += [self.tex_filters['context'](h_l).expand(li_inputs[0].size(0), -1)]
lt = self.renderer['light-transport'](torch.cat(lt_inputs, -1)).reshape(-1, self.D, 3)
li = self.renderer['lighting'](torch.cat(li_inputs, -1)).reshape(-1, self.D, 3)
texture = (lt * li).mean(1)
if self.min_color == -1:
texture = 2 * texture - 1
inputs['texture'] = texture
return inputs
| 21,863 | 45.322034 | 131 |
py
|
NSVF
|
NSVF-main/fairnr/modules/encoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import numpy as np
import math
import sys
import os
import math
import logging
logger = logging.getLogger(__name__)
from pathlib import Path
from plyfile import PlyData, PlyElement
from fairnr.data.data_utils import load_matrix
from fairnr.data.geometry import (
trilinear_interp, splitting_points, offset_points,
get_edge, build_easy_octree, discretize_points
)
from fairnr.clib import (
aabb_ray_intersect, triangle_ray_intersect, svo_ray_intersect,
uniform_ray_sampling, inverse_cdf_sampling
)
from fairnr.modules.module_utils import (
FCBlock, Linear, Embedding,
InvertableMapping
)
MAX_DEPTH = 10000.0
ENCODER_REGISTRY = {}
def register_encoder(name):
def register_encoder_cls(cls):
if name in ENCODER_REGISTRY:
raise ValueError('Cannot register duplicate module ({})'.format(name))
ENCODER_REGISTRY[name] = cls
return cls
return register_encoder_cls
def get_encoder(name):
if name not in ENCODER_REGISTRY:
raise ValueError('Cannot find module {}'.format(name))
return ENCODER_REGISTRY[name]
@register_encoder('abstract_encoder')
class Encoder(nn.Module):
"""
backbone network
"""
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, **kwargs):
raise NotImplementedError
@staticmethod
def add_args(parser):
pass
@register_encoder('volume_encoder')
class VolumeEncoder(Encoder):
def __init__(self, args):
super().__init__(args)
self.context = None
@staticmethod
def add_args(parser):
parser.add_argument('--near', type=float, help='near distance of the volume')
parser.add_argument('--far', type=float, help='far distance of the volume')
def precompute(self, id=None, context=None, *args, **kwargs):
self.context = context # save context which maybe useful later
return {} # we do not use encoder for NeRF
def ray_intersect(self, ray_start, ray_dir, encoder_states, near=None, far=None):
S, V, P, _ = ray_dir.size()
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
near = near if near is not None else self.args.near
far = far if far is not None else self.args.far
intersection_outputs = {
"min_depth": ray_dir.new_ones(S, V * P, 1) * near,
"max_depth": ray_dir.new_ones(S, V * P, 1) * far,
"probs": ray_dir.new_ones(S, V * P, 1),
"steps": ray_dir.new_ones(S, V * P) * self.args.fixed_num_samples,
"intersected_voxel_idx": ray_dir.new_zeros(S, V * P, 1).int()}
hits = ray_dir.new_ones(S, V * P).bool()
return ray_start, ray_dir, intersection_outputs, hits
def ray_sample(self, intersection_outputs):
sampled_idx, sampled_depth, sampled_dists = inverse_cdf_sampling(
intersection_outputs['intersected_voxel_idx'],
intersection_outputs['min_depth'],
intersection_outputs['max_depth'],
intersection_outputs['probs'],
intersection_outputs['steps'], -1, (not self.training))
return {
'sampled_point_depth': sampled_depth,
'sampled_point_distance': sampled_dists,
'sampled_point_voxel_idx': sampled_idx, # dummy index (to match raymarcher)
}
def forward(self, samples, encoder_states):
inputs = {
'pos': samples['sampled_point_xyz'].requires_grad_(True),
'ray': samples['sampled_point_ray_direction'],
'dists': samples['sampled_point_distance']
}
if self.context is not None:
inputs.update({'context': self.context})
return inputs
@register_encoder('infinite_volume_encoder')
class InfiniteVolumeEncoder(VolumeEncoder):
def __init__(self, args):
super().__init__(args)
self.imap = InvertableMapping(style='simple')
self.nofixdz = getattr(args, "no_fix_dz", False)
self.sample_msi = getattr(args, "sample_msi", False)
@staticmethod
def add_args(parser):
VolumeEncoder.add_args(parser)
parser.add_argument('--no-fix-dz', action='store_true', help='do not fix dz.')
parser.add_argument('--sample-msi', action='store_true')
def ray_intersect(self, ray_start, ray_dir, encoder_states):
S, V, P, _ = ray_dir.size()
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
# ray sphere (unit) intersection (assuming all camera is inside sphere):
p_v = (ray_start * ray_dir).sum(-1)
p_p = (ray_start * ray_start).sum(-1)
d_u = -p_v + torch.sqrt(p_v ** 2 - p_p + 1)
intersection_outputs = {
"min_depth": torch.arange(-1, 1, 1, dtype=ray_dir.dtype, device=ray_dir.device)[None, None, :].expand(S, V * P, 2),
"max_depth": torch.arange( 0, 2, 1, dtype=ray_dir.dtype, device=ray_dir.device)[None, None, :].expand(S, V * P, 2),
"probs": ray_dir.new_ones(S, V * P, 2) * .5,
"steps": ray_dir.new_ones(S, V * P, 1) * self.args.fixed_num_samples,
"intersected_voxel_idx": torch.arange( 0, 2, 1, device=ray_dir.device)[None, None, :].expand(S, V * P, 2).int(),
"unit_sphere_depth": d_u,
"p_v": p_v, "p_p": p_p}
hits = ray_dir.new_ones(S, V * P).bool()
return ray_start, ray_dir, intersection_outputs, hits
def ray_sample(self, intersection_outputs):
samples = super().ray_sample(intersection_outputs) # HACK: < 1, unit sphere; > 1, outside the sphere
# map from (0, 1) to (0, +inf) with invertable mapping
samples['original_point_distance'] = samples['sampled_point_distance'].clone()
samples['original_point_depth'] = samples['sampled_point_depth'].clone()
# assign correct depth
in_depth = intersection_outputs['unit_sphere_depth'][:, None] * (
samples['original_point_depth'].clamp(max=0.0) + 1.0).masked_fill(samples['sampled_point_voxel_idx'].ne(0), 0)
if not self.sample_msi:
out_depth = (intersection_outputs['unit_sphere_depth'][:, None] + 1 / (1 - samples['original_point_depth'].clamp(min=0.0) + 1e-7) - 1
).masked_fill(samples['sampled_point_voxel_idx'].ne(1), 0)
else:
p_v, p_p = intersection_outputs['p_v'][:, None], intersection_outputs['p_p'][:, None]
out_depth = (-p_v + torch.sqrt(p_v ** 2 - p_p + 1. / (1. - samples['original_point_depth'].clamp(min=0.0) + 1e-7) ** 2)
).masked_fill(samples['sampled_point_voxel_idx'].ne(1), 0)
samples['sampled_point_depth'] = in_depth + out_depth
if not self.nofixdz:
# raise NotImplementedError("need to re-compute later")
in_dists = 1 / intersection_outputs['unit_sphere_depth'][:, None] * (samples['original_point_distance']).masked_fill(
samples['sampled_point_voxel_idx'].ne(0), 0)
alpha = 1. if not self.sample_msi else 1. / torch.sqrt(1. + (p_v ** 2 - p_p) * (1. - samples['original_point_depth'].clamp(min=0.0) + 1e-7) ** 2)
out_dists = alpha / ((1 - samples['original_point_depth'].clamp(min=0.0)) ** 2 + 1e-7) * (samples['original_point_distance']).masked_fill(
samples['sampled_point_voxel_idx'].ne(1), 0)
samples['sampled_point_distance'] = in_dists + out_dists
else:
samples['sampled_point_distance'] = samples['sampled_point_distance'].scatter(1,
samples['sampled_point_voxel_idx'].ne(-1).sum(-1, keepdim=True) - 1, 1e8)
return samples
def forward(self, samples, encoder_states):
field_inputs = super().forward(samples, encoder_states)
r = field_inputs['pos'].norm(p=2, dim=-1, keepdim=True) # .clamp(min=1.0)
field_inputs['pos'] = torch.cat([field_inputs['pos'] / (r + 1e-8), r / (1.0 + r)], dim=-1)
return field_inputs
@register_encoder('sparsevoxel_encoder')
class SparseVoxelEncoder(Encoder):
def __init__(self, args, voxel_path=None, bbox_path=None, shared_values=None):
super().__init__(args)
# read initial voxels or learned sparse voxels
self.voxel_path = voxel_path if voxel_path is not None else args.voxel_path
self.bbox_path = bbox_path if bbox_path is not None else getattr(args, "initial_boundingbox", None)
assert (self.bbox_path is not None) or (self.voxel_path is not None), \
"at least initial bounding box or pretrained voxel files are required."
self.voxel_index = None
self.scene_scale = getattr(args, "scene_scale", 1.0)
if self.voxel_path is not None:
# read voxel file
assert os.path.exists(self.voxel_path), "voxel file must exist"
if Path(self.voxel_path).suffix == '.ply':
from plyfile import PlyData, PlyElement
plyvoxel = PlyData.read(self.voxel_path)
elements = [x.name for x in plyvoxel.elements]
assert 'vertex' in elements
plydata = plyvoxel['vertex']
fine_points = torch.from_numpy(
np.stack([plydata['x'], plydata['y'], plydata['z']]).astype('float32').T)
if 'face' in elements:
# read voxel meshes... automatically detect voxel size
faces = plyvoxel['face']['vertex_indices']
t = fine_points[faces[0].astype('int64')]
voxel_size = torch.abs(t[0] - t[1]).max()
# indexing voxel vertices
fine_points = torch.unique(fine_points, dim=0)
# vertex_ids, _ = discretize_points(fine_points, voxel_size)
# vertex_ids_offset = vertex_ids + 1
# # simple hashing
# vertex_ids = vertex_ids[:, 0] * 1000000 + vertex_ids[:, 1] * 1000 + vertex_ids[:, 2]
# vertex_ids_offset = vertex_ids_offset[:, 0] * 1000000 + vertex_ids_offset[:, 1] * 1000 + vertex_ids_offset[:, 2]
# vertex_ids = {k: True for k in vertex_ids.tolist()}
# vertex_inside = [v in vertex_ids for v in vertex_ids_offset.tolist()]
# # get voxel centers
# fine_points = fine_points[torch.tensor(vertex_inside)] + voxel_size * .5
# fine_points = fine_points + voxel_size * .5 --> use all corners as centers
else:
# voxel size must be provided
assert getattr(args, "voxel_size", None) is not None, "final voxel size is essential."
voxel_size = args.voxel_size
if 'quality' in elements:
self.voxel_index = torch.from_numpy(plydata['quality']).long()
else:
# supporting the old style .txt voxel points
fine_points = torch.from_numpy(np.loadtxt(self.voxel_path)[:, 3:].astype('float32'))
else:
# read bounding-box file
bbox = np.loadtxt(self.bbox_path)
voxel_size = bbox[-1] if getattr(args, "voxel_size", None) is None else args.voxel_size
fine_points = torch.from_numpy(bbox2voxels(bbox[:6], voxel_size))
half_voxel = voxel_size * .5
# transform from voxel centers to voxel corners (key/values)
fine_coords, _ = discretize_points(fine_points, half_voxel)
fine_keys0 = offset_points(fine_coords, 1.0).reshape(-1, 3)
fine_keys, fine_feats = torch.unique(fine_keys0, dim=0, sorted=True, return_inverse=True)
fine_feats = fine_feats.reshape(-1, 8)
num_keys = torch.scalar_tensor(fine_keys.size(0)).long()
# ray-marching step size
if getattr(args, "raymarching_stepsize_ratio", 0) > 0:
step_size = args.raymarching_stepsize_ratio * voxel_size
else:
step_size = args.raymarching_stepsize
# register parameters (will be saved to checkpoints)
self.register_buffer("points", fine_points) # voxel centers
self.register_buffer("keys", fine_keys.long()) # id used to find voxel corners/embeddings
self.register_buffer("feats", fine_feats.long()) # for each voxel, 8 voxel corner ids
self.register_buffer("num_keys", num_keys)
self.register_buffer("keep", fine_feats.new_ones(fine_feats.size(0)).long()) # whether the voxel will be pruned
self.register_buffer("voxel_size", torch.scalar_tensor(voxel_size))
self.register_buffer("step_size", torch.scalar_tensor(step_size))
self.register_buffer("max_hits", torch.scalar_tensor(args.max_hits))
logger.info("loaded {} voxel centers, {} voxel corners".format(fine_points.size(0), num_keys))
# set-up other hyperparameters and initialize running time caches
self.embed_dim = getattr(args, "voxel_embed_dim", None)
self.deterministic_step = getattr(args, "deterministic_step", False)
self.use_octree = getattr(args, "use_octree", False)
self.track_max_probs = getattr(args, "track_max_probs", False)
self._runtime_caches = {
"flatten_centers": None,
"flatten_children": None,
"max_voxel_probs": None
}
# sparse voxel embeddings
if shared_values is None and self.embed_dim > 0:
self.values = Embedding(num_keys, self.embed_dim, None)
else:
self.values = shared_values
def upgrade_state_dict_named(self, state_dict, name):
# update the voxel embedding shapes
if self.values is not None:
loaded_values = state_dict[name + '.values.weight']
self.values.weight = nn.Parameter(self.values.weight.new_zeros(*loaded_values.size()))
self.values.num_embeddings = self.values.weight.size(0)
self.total_size = self.values.weight.size(0)
self.num_keys = self.num_keys * 0 + self.total_size
if self.voxel_index is not None:
state_dict[name + '.points'] = state_dict[name + '.points'][self.voxel_index]
state_dict[name + '.feats'] = state_dict[name + '.feats'][self.voxel_index]
state_dict[name + '.keep'] = state_dict[name + '.keep'][self.voxel_index]
# update the buffers shapes
if name + '.points' in state_dict:
self.points = self.points.new_zeros(*state_dict[name + '.points'].size())
self.feats = self.feats.new_zeros(*state_dict[name + '.feats'].size())
self.keys = self.keys.new_zeros(*state_dict[name + '.keys'].size())
self.keep = self.keep.new_zeros(*state_dict[name + '.keep'].size())
else:
# this usually happens when loading a NeRF checkpoint to NSVF
# use initialized values
state_dict[name + '.points'] = self.points
state_dict[name + '.feats'] = self.feats
state_dict[name + '.keys'] = self.keys
state_dict[name + '.keep'] = self.keep
state_dict[name + '.voxel_size'] = self.voxel_size
state_dict[name + '.step_size'] = self.step_size
state_dict[name + '.max_hits'] = self.max_hits
state_dict[name + '.num_keys'] = self.num_keys
@staticmethod
def add_args(parser):
parser.add_argument('--initial-boundingbox', type=str, help='the initial bounding box to initialize the model')
parser.add_argument('--voxel-size', type=float, metavar='D', help='voxel size of the input points (initial')
parser.add_argument('--voxel-path', type=str, help='path for pretrained voxel file. if provided no update')
parser.add_argument('--voxel-embed-dim', type=int, metavar='N', help="embedding size")
parser.add_argument('--deterministic-step', action='store_true',
help='if set, the model runs fixed stepsize, instead of sampling one')
parser.add_argument('--max-hits', type=int, metavar='N', help='due to restrictions we set a maximum number of hits')
parser.add_argument('--raymarching-stepsize', type=float, metavar='D',
help='ray marching step size for sparse voxels')
parser.add_argument('--raymarching-stepsize-ratio', type=float, metavar='D',
help='if the concrete step size is not given (=0), we use the ratio to the voxel size as step size.')
parser.add_argument('--use-octree', action='store_true', help='if set, instead of looping over the voxels, we build an octree.')
parser.add_argument('--track-max-probs', action='store_true', help='if set, tracking the maximum probability in ray-marching.')
parser.add_argument('--scene-scale', type=float, default=1.0)
def reset_runtime_caches(self):
logger.info("reset chache")
if self.use_octree:
points = self.points[self.keep.bool()]
centers, children = build_easy_octree(points, self.voxel_size / 2.0)
self._runtime_caches['flatten_centers'] = centers
self._runtime_caches['flatten_children'] = children
if self.track_max_probs:
self._runtime_caches['max_voxel_probs'] = self.points.new_zeros(self.points.size(0))
def clean_runtime_caches(self):
logger.info("clean chache")
for name in self._runtime_caches:
self._runtime_caches[name] = None
def precompute(self, id=None, *args, **kwargs):
feats = self.feats[self.keep.bool()]
points = self.points[self.keep.bool()]
points[:, 0] += (self.voxel_size / 10)
values = self.values.weight[: self.num_keys] if self.values is not None else None
if id is not None:
# extend size to support multi-objects
feats = feats.unsqueeze(0).expand(id.size(0), *feats.size()).contiguous()
points = points.unsqueeze(0).expand(id.size(0), *points.size()).contiguous()
values = values.unsqueeze(0).expand(id.size(0), *values.size()).contiguous() if values is not None else None
# moving to multiple objects
if id.size(0) > 1:
feats = feats + self.num_keys * torch.arange(id.size(0),
device=feats.device, dtype=feats.dtype)[:, None, None]
encoder_states = {
'voxel_vertex_idx': feats,
'voxel_center_xyz': points,
'voxel_vertex_emb': values
}
if self.use_octree:
flatten_centers, flatten_children = self.flatten_centers.clone(), self.flatten_children.clone()
if id is not None:
flatten_centers = flatten_centers.unsqueeze(0).expand(id.size(0), *flatten_centers.size()).contiguous()
flatten_children = flatten_children.unsqueeze(0).expand(id.size(0), *flatten_children.size()).contiguous()
encoder_states['voxel_octree_center_xyz'] = flatten_centers
encoder_states['voxel_octree_children_idx'] = flatten_children
return encoder_states
@torch.no_grad()
def export_voxels(self, return_mesh=False):
logger.info("exporting learned sparse voxels...")
voxel_idx = torch.arange(self.keep.size(0), device=self.keep.device)
voxel_idx = voxel_idx[self.keep.bool()]
voxel_pts = self.points[self.keep.bool()]
if not return_mesh:
# HACK: we export the original voxel indices as "quality" in case for editing
points = [
(voxel_pts[k, 0], voxel_pts[k, 1], voxel_pts[k, 2], voxel_idx[k])
for k in range(voxel_idx.size(0))
]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('quality', 'f4')])
return PlyData([PlyElement.describe(vertex, 'vertex')])
else:
# generate polygon for voxels
center_coords, residual = discretize_points(voxel_pts, self.voxel_size / 2)
offsets = torch.tensor([[-1,-1,-1],[-1,-1,1],[-1,1,-1],[1,-1,-1],[1,1,-1],[1,-1,1],[-1,1,1],[1,1,1]], device=center_coords.device)
vertex_coords = center_coords[:, None, :] + offsets[None, :, :]
vertex_points = vertex_coords.type_as(residual) * self.voxel_size / 2 + residual
faceidxs = [[1,6,7,5],[7,6,2,4],[5,7,4,3],[1,0,2,6],[1,5,3,0],[0,3,4,2]]
all_vertex_keys, all_vertex_idxs = {}, []
for i in range(vertex_coords.shape[0]):
for j in range(8):
key = " ".join(["{}".format(int(p)) for p in vertex_coords[i,j]])
if key not in all_vertex_keys:
all_vertex_keys[key] = vertex_points[i,j]
all_vertex_idxs += [key]
all_vertex_dicts = {key: u for u, key in enumerate(all_vertex_idxs)}
all_faces = torch.stack([torch.stack([vertex_coords[:, k] for k in f]) for f in faceidxs]).permute(2,0,1,3).reshape(-1,4,3)
all_faces_keys = {}
for l in range(all_faces.size(0)):
key = " ".join(["{}".format(int(p)) for p in all_faces[l].sum(0) // 4])
if key not in all_faces_keys:
all_faces_keys[key] = all_faces[l]
vertex = np.array([tuple(all_vertex_keys[key].cpu().tolist()) for key in all_vertex_idxs], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
face = np.array([([all_vertex_dicts["{} {} {}".format(*b)] for b in a.cpu().tolist()],) for a in all_faces_keys.values()],
dtype=[('vertex_indices', 'i4', (4,))])
return PlyData([PlyElement.describe(vertex, 'vertex'), PlyElement.describe(face, 'face')])
@torch.no_grad()
def export_surfaces(self, field_fn, th, bits):
"""
extract triangle-meshes from the implicit field using marching cube algorithm
Lewiner, Thomas, et al. "Efficient implementation of marching cubes' cases with topological guarantees."
Journal of graphics tools 8.2 (2003): 1-15.
"""
logger.info("marching cube...")
encoder_states = self.precompute(id=None)
points = encoder_states['voxel_center_xyz']
scores = self.get_scores(field_fn, th=th, bits=bits, encoder_states=encoder_states)
coords, residual = discretize_points(points, self.voxel_size)
A, B, C = [s + 1 for s in coords.max(0).values.cpu().tolist()]
# prepare grids
full_grids = points.new_ones(A * B * C, bits ** 3)
full_grids[coords[:, 0] * B * C + coords[:, 1] * C + coords[:, 2]] = scores
full_grids = full_grids.reshape(A, B, C, bits, bits, bits)
full_grids = full_grids.permute(0, 3, 1, 4, 2, 5).reshape(A * bits, B * bits, C * bits)
full_grids = 1 - full_grids
# marching cube
from skimage import measure
space_step = self.voxel_size.item() / bits
verts, faces, normals, _ = measure.marching_cubes_lewiner(
volume=full_grids.cpu().numpy(), level=0.5,
spacing=(space_step, space_step, space_step)
)
verts += (residual - (self.voxel_size / 2)).cpu().numpy()
verts = np.array([tuple(a) for a in verts.tolist()], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
faces = np.array([(a, ) for a in faces.tolist()], dtype=[('vertex_indices', 'i4', (3,))])
return PlyData([PlyElement.describe(verts, 'vertex'), PlyElement.describe(faces, 'face')])
def get_edge(self, ray_start, ray_dir, samples, encoder_states):
outs = get_edge(
ray_start + ray_dir * samples['sampled_point_depth'][:, :1],
encoder_states['voxel_center_xyz'].reshape(-1, 3)[samples['sampled_point_voxel_idx'][:, 0].long()],
self.voxel_size).type_as(ray_dir) # get voxel edges/depth (for visualization)
outs = (1 - outs[:, None].expand(outs.size(0), 3)) * 0.7
return outs
def ray_intersect(self, ray_start, ray_dir, encoder_states):
point_feats = encoder_states['voxel_vertex_idx']
point_xyz = encoder_states['voxel_center_xyz']
S, V, P, _ = ray_dir.size()
_, H, D = point_feats.size()
# ray-voxel intersection
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
if self.use_octree: # ray-voxel intersection with SVO
flatten_centers = encoder_states['voxel_octree_center_xyz']
flatten_children = encoder_states['voxel_octree_children_idx']
pts_idx, min_depth, max_depth = svo_ray_intersect(
self.voxel_size, self.max_hits, flatten_centers, flatten_children,
ray_start, ray_dir)
else: # ray-voxel intersection with all voxels
pts_idx, min_depth, max_depth = aabb_ray_intersect(
self.voxel_size, self.max_hits, point_xyz, ray_start, ray_dir)
# sort the depths
min_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
max_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
min_depth, sorted_idx = min_depth.sort(dim=-1)
max_depth = max_depth.gather(-1, sorted_idx)
pts_idx = pts_idx.gather(-1, sorted_idx)
hits = pts_idx.ne(-1).any(-1) # remove all points that completely miss the object
if S > 1: # extend the point-index to multiple shapes (just in case)
pts_idx = (pts_idx + H * torch.arange(S,
device=pts_idx.device, dtype=pts_idx.dtype)[:, None, None]
).masked_fill_(pts_idx.eq(-1), -1)
intersection_outputs = {
"min_depth": min_depth,
"max_depth": max_depth,
"intersected_voxel_idx": pts_idx
}
return ray_start, ray_dir, intersection_outputs, hits
def ray_sample(self, intersection_outputs):
# sample points and use middle point approximation
sampled_idx, sampled_depth, sampled_dists = inverse_cdf_sampling(
intersection_outputs['intersected_voxel_idx'],
intersection_outputs['min_depth'],
intersection_outputs['max_depth'],
intersection_outputs['probs'],
intersection_outputs['steps'],
-1, self.deterministic_step or (not self.training))
sampled_dists = sampled_dists.clamp(min=0.0)
sampled_depth.masked_fill_(sampled_idx.eq(-1), MAX_DEPTH)
sampled_dists.masked_fill_(sampled_idx.eq(-1), 0.0)
samples = {
'sampled_point_depth': sampled_depth,
'sampled_point_distance': sampled_dists,
'sampled_point_voxel_idx': sampled_idx,
}
return samples
@torch.enable_grad()
def forward(self, samples, encoder_states):
# encoder states
point_feats = encoder_states['voxel_vertex_idx']
point_xyz = encoder_states['voxel_center_xyz']
values = encoder_states['voxel_vertex_emb']
# ray point samples
sampled_idx = samples['sampled_point_voxel_idx'].long()
sampled_xyz = samples['sampled_point_xyz'].requires_grad_(True)
sampled_dir = samples['sampled_point_ray_direction']
sampled_dis = samples['sampled_point_distance']
# prepare inputs for implicit field
# / self.scene_scale
inputs = {
'pos': sampled_xyz,
'ray': sampled_dir,
'dists': sampled_dis}
# --- just for debugging ---- #
# r = inputs['pos'].norm(p=2, dim=-1, keepdim=True)
# inputs['pos'] = torch.cat([inputs['pos'] / (r + 1e-8), r / (1 + r)], dim=-1)
if values is not None:
# resample point features
point_xyz = F.embedding(sampled_idx, point_xyz)
point_feats = F.embedding(F.embedding(sampled_idx, point_feats), values).view(point_xyz.size(0), -1)
# tri-linear interpolation
p = ((sampled_xyz - point_xyz) / self.voxel_size + .5).unsqueeze(1)
q = offset_points(p, .5, offset_only=True).unsqueeze(0) + .5 # BUG (FIX)
inputs.update({'emb': trilinear_interp(p, q, point_feats)})
return inputs
@torch.no_grad()
def track_voxel_probs(self, voxel_idxs, voxel_probs):
voxel_idxs = voxel_idxs.masked_fill(voxel_idxs.eq(-1), self.max_voxel_probs.size(0))
chunk_size = 4096
for start in range(0, voxel_idxs.size(0), chunk_size):
end = start + chunk_size
end = end if end < voxel_idxs.size(0) else voxel_idxs.size(0)
max_voxel_probs = self.max_voxel_probs.new_zeros(end-start, self.max_voxel_probs.size(0) + 1).scatter_add_(
dim=-1, index=voxel_idxs[start:end], src=voxel_probs[start:end]).max(0)[0][:-1].data
self.max_voxel_probs = torch.max(self.max_voxel_probs, max_voxel_probs)
@torch.no_grad()
def pruning(self, field_fn, th=0.5, encoder_states=None, train_stats=False):
if not train_stats:
logger.info("pruning...")
scores = self.get_scores(field_fn, th=th, bits=16, encoder_states=encoder_states)
keep = (1 - scores.min(-1)[0]) > th
else:
logger.info("pruning based on training set statics (e.g. probs)...")
if dist.is_initialized() and dist.get_world_size() > 1: # sync on multi-gpus
dist.all_reduce(self.max_voxel_probs, op=dist.ReduceOp.MAX)
keep = self.max_voxel_probs > th
self.keep.masked_scatter_(self.keep.bool(), keep.long())
logger.info("pruning done. # of voxels before: {}, after: {} voxels".format(keep.size(0), keep.sum()))
def get_scores(self, field_fn, th=0.5, bits=16, encoder_states=None):
if encoder_states is None:
encoder_states = self.precompute(id=None)
feats = encoder_states['voxel_vertex_idx']
points = encoder_states['voxel_center_xyz']
values = encoder_states['voxel_vertex_emb']
chunk_size = 64
def get_scores_once(feats, points, values):
# sample points inside voxels
sampled_xyz = offset_points(points, self.voxel_size / 2.0, bits=bits)
sampled_idx = torch.arange(points.size(0), device=points.device)[:, None].expand(*sampled_xyz.size()[:2])
sampled_xyz, sampled_idx = sampled_xyz.reshape(-1, 3), sampled_idx.reshape(-1)
field_inputs = self.forward(
{'sampled_point_xyz': sampled_xyz,
'sampled_point_voxel_idx': sampled_idx,
'sampled_point_ray_direction': None,
'sampled_point_distance': None},
{'voxel_vertex_idx': feats,
'voxel_center_xyz': points,
'voxel_vertex_emb': values}) # get field inputs
if encoder_states.get('context', None) is not None:
field_inputs['context'] = encoder_states['context']
# evaluation with density
field_outputs = field_fn(field_inputs, outputs=['sigma'])
free_energy = -torch.relu(field_outputs['sigma']).reshape(-1, bits ** 3)
# return scores
return torch.exp(free_energy)
return torch.cat([get_scores_once(feats[i: i + chunk_size], points[i: i + chunk_size], values)
for i in range(0, points.size(0), chunk_size)], 0)
@torch.no_grad()
def splitting(self):
logger.info("splitting...")
encoder_states = self.precompute(id=None)
feats, points, values = encoder_states['voxel_vertex_idx'], encoder_states['voxel_center_xyz'], encoder_states['voxel_vertex_emb']
new_points, new_feats, new_values, new_keys = splitting_points(points, feats, values, self.voxel_size / 2.0)
new_num_keys = new_keys.size(0)
new_point_length = new_points.size(0)
# set new voxel embeddings
if new_values is not None:
self.values.weight = nn.Parameter(new_values)
self.values.num_embeddings = self.values.weight.size(0)
self.total_size = new_num_keys
self.num_keys = self.num_keys * 0 + self.total_size
self.points = new_points
self.feats = new_feats
self.keep = self.keep.new_ones(new_point_length)
logger.info("splitting done. # of voxels before: {}, after: {} voxels".format(points.size(0), self.keep.sum()))
@property
def flatten_centers(self):
if self._runtime_caches['flatten_centers'] is None:
self.reset_runtime_caches()
return self._runtime_caches['flatten_centers']
@property
def flatten_children(self):
if self._runtime_caches['flatten_children'] is None:
self.reset_runtime_caches()
return self._runtime_caches['flatten_children']
@property
def max_voxel_probs(self):
if self._runtime_caches['max_voxel_probs'] is None:
self.reset_runtime_caches()
return self._runtime_caches['max_voxel_probs']
@max_voxel_probs.setter
def max_voxel_probs(self, x):
self._runtime_caches['max_voxel_probs'] = x
@property
def feature_dim(self):
return self.embed_dim
@property
def dummy_loss(self):
if self.values is not None:
return self.values.weight[0,0] * 0.0
return 0.0
@property
def num_voxels(self):
return self.keep.long().sum()
@register_encoder('multi_sparsevoxel_encoder')
class MultiSparseVoxelEncoder(Encoder):
def __init__(self, args):
super().__init__(args)
try:
self.all_voxels = nn.ModuleList(
[SparseVoxelEncoder(args, vox.strip()) for vox in open(args.voxel_path).readlines()])
except TypeError:
bbox_path = getattr(args, "bbox_path", "/private/home/jgu/data/shapenet/disco_dataset/bunny_point.txt")
self.all_voxels = nn.ModuleList(
[SparseVoxelEncoder(args, None, g.strip() + '/bbox.txt') for g in open(bbox_path).readlines()])
# properties
self.deterministic_step = getattr(args, "deterministic_step", False)
self.use_octree = getattr(args, "use_octree", False)
self.track_max_probs = getattr(args, "track_max_probs", False)
self.cid = None
if getattr(self.args, "global_embeddings", None) is not None:
self.global_embed = torch.zeros(*eval(self.args.global_embeddings)).normal_(mean=0, std=0.01)
self.global_embed = nn.Parameter(self.global_embed, requires_grad=True)
else:
self.global_embed = None
@staticmethod
def add_args(parser):
SparseVoxelEncoder.add_args(parser)
parser.add_argument('--bbox-path', type=str, default=None)
parser.add_argument('--global-embeddings', type=str, default=None,
help="""set global embeddings if provided in global.txt. We follow this format:
(N, D) or (K, N, D) if we have multi-dimensional global features.
D is the global feature dimentions.
N is the number of indices of this feature,
and K is the number of features if provided.""")
def reset_runtime_caches(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].reset_runtime_caches()
def clean_runtime_caches(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].clean_runtime_caches()
def precompute(self, id, global_index=None, *args, **kwargs):
# TODO: this is a HACK for simplicity
assert id.size(0) == 1, "for now, only works for one object"
# HACK
# id = id * 0 + 2
self.cid = id[0]
encoder_states = self.all_voxels[id[0]].precompute(id, *args, **kwargs)
if (global_index is not None) and (self.global_embed is not None):
encoder_states['context'] = torch.stack([
F.embedding(global_index[:, i], self.global_embed[i])
for i in range(self.global_embed.size(0))], 1)
return encoder_states
def export_surfaces(self, field_fn, th, bits):
raise NotImplementedError("does not support for now.")
def export_voxels(self, return_mesh=False):
raise NotImplementedError("does not support for now.")
def get_edge(self, *args, **kwargs):
return self.all_voxels[self.cid].get_edge(*args, **kwargs)
def ray_intersect(self, *args, **kwargs):
return self.all_voxels[self.cid].ray_intersect(*args, **kwargs)
def ray_sample(self, *args, **kwargs):
return self.all_voxels[self.cid].ray_sample(*args, **kwargs)
def forward(self, samples, encoder_states):
inputs = self.all_voxels[self.cid].forward(samples, encoder_states)
if encoder_states.get('context', None) is not None:
inputs['context'] = encoder_states['context']
return inputs
def track_voxel_probs(self, voxel_idxs, voxel_probs):
return self.all_voxels[self.cid].track_voxel_probs(voxel_idxs, voxel_probs)
@torch.no_grad()
def pruning(self, field_fn, th=0.5, train_stats=False):
for id in range(len(self.all_voxels)):
self.all_voxels[id].pruning(field_fn, th, train_stats=train_stats)
@torch.no_grad()
def splitting(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].splitting()
@property
def feature_dim(self):
return self.all_voxels[0].embed_dim
@property
def dummy_loss(self):
return sum([d.dummy_loss for d in self.all_voxels])
@property
def voxel_size(self):
return self.all_voxels[0].voxel_size
@voxel_size.setter
def voxel_size(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].voxel_size = x
@property
def step_size(self):
return self.all_voxels[0].step_size
@step_size.setter
def step_size(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].step_size = x
@property
def max_hits(self):
return self.all_voxels[0].max_hits
@max_hits.setter
def max_hits(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].max_hits = x
@property
def num_voxels(self):
return self.all_voxels[self.cid].num_voxels
@register_encoder('shared_sparsevoxel_encoder')
class SharedSparseVoxelEncoder(MultiSparseVoxelEncoder):
"""
Different from MultiSparseVoxelEncoder, we assume a shared list
of voxels across all models. Usually useful to learn a video sequence.
"""
def __init__(self, args):
super(MultiSparseVoxelEncoder, self).__init__(args)
# using a shared voxel
self.voxel_path = args.voxel_path
self.num_frames = args.num_frames
self.all_voxels = [SparseVoxelEncoder(args, self.voxel_path)]
self.all_voxels = nn.ModuleList(self.all_voxels + [
SparseVoxelEncoder(args, self.voxel_path, shared_values=self.all_voxels[0].values)
for i in range(self.num_frames - 1)])
self.context_embed_dim = args.context_embed_dim
self.contexts = nn.Embedding(self.num_frames, self.context_embed_dim, None)
self.cid = None
@staticmethod
def add_args(parser):
SparseVoxelEncoder.add_args(parser)
parser.add_argument('--num-frames', type=int, help='the total number of frames')
parser.add_argument('--context-embed-dim', type=int, help='context embedding for each view')
def forward(self, samples, encoder_states):
inputs = self.all_voxels[self.cid].forward(samples, encoder_states)
inputs.update({'context': self.contexts(self.cid).unsqueeze(0)})
return inputs
@torch.no_grad()
def pruning(self, field_fn, th=0.5, train_stats=False):
for cid in range(len(self.all_voxels)):
id = torch.tensor([cid], device=self.contexts.weight.device)
encoder_states = {name: v[0] if v is not None else v
for name, v in self.precompute(id).items()}
encoder_states['context'] = self.contexts(id)
self.all_voxels[cid].pruning(field_fn, th,
encoder_states=encoder_states,
train_stats=train_stats)
@torch.no_grad()
def splitting(self):
logger.info("splitting...")
all_feats, all_points = [], []
for id in range(len(self.all_voxels)):
encoder_states = self.all_voxels[id].precompute(id=None)
feats = encoder_states['voxel_vertex_idx']
points = encoder_states['voxel_center_xyz']
values = encoder_states['voxel_vertex_emb']
all_feats.append(feats)
all_points.append(points)
feats, points = torch.cat(all_feats, 0), torch.cat(all_points, 0)
unique_feats, unique_idx = torch.unique(feats, dim=0, return_inverse=True)
unique_points = points[
unique_feats.new_zeros(unique_feats.size(0)).scatter_(
0, unique_idx, torch.arange(unique_idx.size(0), device=unique_feats.device)
)]
new_points, new_feats, new_values, new_keys = splitting_points(unique_points, unique_feats, values, self.voxel_size / 2.0)
new_num_keys = new_keys.size(0)
new_point_length = new_points.size(0)
# set new voxel embeddings (shared voxels)
if values is not None:
self.all_voxels[0].values.weight = nn.Parameter(new_values)
self.all_voxels[0].values.num_embeddings = new_num_keys
for id in range(len(self.all_voxels)):
self.all_voxels[id].total_size = new_num_keys
self.all_voxels[id].num_keys = self.all_voxels[id].num_keys * 0 + self.all_voxels[id].total_size
self.all_voxels[id].points = new_points
self.all_voxels[id].feats = new_feats
self.all_voxels[id].keep = self.all_voxels[id].keep.new_ones(new_point_length)
logger.info("splitting done. # of voxels before: {}, after: {} voxels".format(
unique_points.size(0), new_point_length))
@property
def feature_dim(self):
return self.all_voxels[0].embed_dim + self.context_embed_dim
@register_encoder('triangle_mesh_encoder')
class TriangleMeshEncoder(SparseVoxelEncoder):
"""
Training on fixed mesh model. Cannot pruning..
"""
def __init__(self, args, mesh_path=None, shared_values=None):
super(SparseVoxelEncoder, self).__init__(args)
self.mesh_path = mesh_path if mesh_path is not None else args.mesh_path
assert (self.mesh_path is not None) and os.path.exists(self.mesh_path)
import open3d as o3d
mesh = o3d.io.read_triangle_mesh(self.mesh_path)
vertices = torch.from_numpy(np.asarray(mesh.vertices, dtype=np.float32))
faces = torch.from_numpy(np.asarray(mesh.triangles, dtype=np.long))
step_size = args.raymarching_stepsize
if getattr(args, "raymarching_margin", None) is None:
margin = step_size * 10 # truncated space around the triangle surfaces
else:
margin = args.raymarching_margin
self.register_buffer("margin", torch.scalar_tensor(margin))
self.register_buffer("step_size", torch.scalar_tensor(step_size))
self.register_buffer("max_hits", torch.scalar_tensor(args.max_hits))
self.vertices = nn.Parameter(vertices, requires_grad=getattr(args, "trainable_vertices", False))
self.faces = nn.Parameter(faces, requires_grad=False)
# set-up other hyperparameters
self.embed_dim = getattr(args, "voxel_embed_dim", None)
self.deterministic_step = getattr(args, "deterministic_step", False)
self.values = None
self.blur_ratio = getattr(args, "blur_ratio", 0.0)
def upgrade_state_dict_named(self, state_dict, name):
pass
@staticmethod
def add_args(parser):
parser.add_argument('--mesh-path', type=str, help='path for initial mesh file')
parser.add_argument('--voxel-embed-dim', type=int, metavar='N', help="embedding size")
parser.add_argument('--deterministic-step', action='store_true',
help='if set, the model runs fixed stepsize, instead of sampling one')
parser.add_argument('--max-hits', type=int, metavar='N', help='due to restrictions we set a maximum number of hits')
parser.add_argument('--raymarching-stepsize', type=float, metavar='D',
help='ray marching step size for sparse voxels')
parser.add_argument('--raymarching-margin', type=float, default=None,
help='margin around the surface.')
parser.add_argument('--blur-ratio', type=float, default=0,
help="it is possible to shoot outside the triangle. default=0")
parser.add_argument('--trainable-vertices', action='store_true',
help='if set, making the triangle trainable. experimental code. not ideal.')
def precompute(self, id=None, *args, **kwargs):
feats, points, values = self.faces, self.vertices, self.values
if id is not None:
# extend size to support multi-objects
feats = feats.unsqueeze(0).expand(id.size(0), *feats.size()).contiguous()
points = points.unsqueeze(0).expand(id.size(0), *points.size()).contiguous()
values = values.unsqueeze(0).expand(id.size(0), *values.size()).contiguous() if values is not None else None
# moving to multiple objects
if id.size(0) > 1:
feats = feats + points.size(1) * torch.arange(id.size(0),
device=feats.device, dtype=feats.dtype)[:, None, None]
encoder_states = {
'mesh_face_vertex_idx': feats,
'mesh_vertex_xyz': points,
}
return encoder_states
def get_edge(self, ray_start, ray_dir, *args, **kwargs):
return torch.ones_like(ray_dir) * 0.7
@property
def voxel_size(self):
return self.margin
def ray_intersect(self, ray_start, ray_dir, encoder_states):
point_xyz = encoder_states['mesh_vertex_xyz']
point_feats = encoder_states['mesh_face_vertex_idx']
S, V, P, _ = ray_dir.size()
F, G = point_feats.size(1), point_xyz.size(1)
# ray-voxel intersection
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
pts_idx, depth, uv = triangle_ray_intersect(
self.margin, self.blur_ratio, self.max_hits, point_xyz, point_feats, ray_start, ray_dir)
min_depth = (depth[:,:,:,0] + depth[:,:,:,1]).masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
max_depth = (depth[:,:,:,0] + depth[:,:,:,2]).masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
hits = pts_idx.ne(-1).any(-1) # remove all points that completely miss the object
if S > 1: # extend the point-index to multiple shapes (just in case)
pts_idx = (pts_idx + G * torch.arange(S,
device=pts_idx.device, dtype=pts_idx.dtype)[:, None, None]
).masked_fill_(pts_idx.eq(-1), -1)
intersection_outputs = {
"min_depth": min_depth,
"max_depth": max_depth,
"intersected_voxel_idx": pts_idx
}
return ray_start, ray_dir, intersection_outputs, hits
@torch.enable_grad()
def forward(self, samples, encoder_states):
return {
'pos': samples['sampled_point_xyz'].requires_grad_(True),
'ray': samples['sampled_point_ray_direction'],
'dists': samples['sampled_point_distance']
}
@property
def num_voxels(self):
return self.vertices.size(0)
def bbox2voxels(bbox, voxel_size):
vox_min, vox_max = bbox[:3], bbox[3:]
steps = ((vox_max - vox_min) / voxel_size).round().astype('int64') + 1
x, y, z = [c.reshape(-1).astype('float32') for c in np.meshgrid(np.arange(steps[0]), np.arange(steps[1]), np.arange(steps[2]))]
x, y, z = x * voxel_size + vox_min[0], y * voxel_size + vox_min[1], z * voxel_size + vox_min[2]
return np.stack([x, y, z]).T.astype('float32')
| 49,252 | 45.377589 | 157 |
py
|
NSVF
|
NSVF-main/fairnr/modules/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('fairnr.modules.' + model_name)
| 651 | 42.466667 | 111 |
py
|
NSVF
|
NSVF-main/fairnr/modules/hyper.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''
Pytorch implementations of hyper-network modules.
This code is largely adapted from
https://github.com/vsitzmann/scene-representation-networks
'''
import torch
import torch.nn as nn
import functools
from fairnr.modules.module_utils import FCBlock
def partialclass(cls, *args, **kwds):
class NewCls(cls):
__init__ = functools.partialmethod(cls.__init__, *args, **kwds)
return NewCls
class LookupLayer(nn.Module):
def __init__(self, in_ch, out_ch, num_objects):
super().__init__()
self.out_ch = out_ch
self.lookup_lin = LookupLinear(in_ch,
out_ch,
num_objects=num_objects)
self.norm_nl = nn.Sequential(
nn.LayerNorm([self.out_ch], elementwise_affine=False),
nn.ReLU(inplace=True)
)
def forward(self, obj_idx):
net = nn.Sequential(
self.lookup_lin(obj_idx),
self.norm_nl
)
return net
class LookupFC(nn.Module):
def __init__(self,
hidden_ch,
num_hidden_layers,
num_objects,
in_ch,
out_ch,
outermost_linear=False):
super().__init__()
self.layers = nn.ModuleList()
self.layers.append(LookupLayer(in_ch=in_ch, out_ch=hidden_ch, num_objects=num_objects))
for i in range(num_hidden_layers):
self.layers.append(LookupLayer(in_ch=hidden_ch, out_ch=hidden_ch, num_objects=num_objects))
if outermost_linear:
self.layers.append(LookupLinear(in_ch=hidden_ch, out_ch=out_ch, num_objects=num_objects))
else:
self.layers.append(LookupLayer(in_ch=hidden_ch, out_ch=out_ch, num_objects=num_objects))
def forward(self, obj_idx):
net = []
for i in range(len(self.layers)):
net.append(self.layers[i](obj_idx))
return nn.Sequential(*net)
class LookupLinear(nn.Module):
def __init__(self,
in_ch,
out_ch,
num_objects):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.hypo_params = nn.Embedding(num_objects, in_ch * out_ch + out_ch)
for i in range(num_objects):
nn.init.kaiming_normal_(self.hypo_params.weight.data[i, :self.in_ch * self.out_ch].view(self.out_ch, self.in_ch),
a=0.0,
nonlinearity='relu',
mode='fan_in')
self.hypo_params.weight.data[i, self.in_ch * self.out_ch:].fill_(0.)
def forward(self, obj_idx):
hypo_params = self.hypo_params(obj_idx)
# Indices explicit to catch erros in shape of output layer
weights = hypo_params[..., :self.in_ch * self.out_ch]
biases = hypo_params[..., self.in_ch * self.out_ch:(self.in_ch * self.out_ch)+self.out_ch]
biases = biases.view(*(biases.size()[:-1]), 1, self.out_ch)
weights = weights.view(*(weights.size()[:-1]), self.out_ch, self.in_ch)
return BatchLinear(weights=weights, biases=biases)
class HyperLayer(nn.Module):
'''A hypernetwork that predicts a single Dense Layer, including LayerNorm and a ReLU.'''
def __init__(self,
in_ch,
out_ch,
hyper_in_ch,
hyper_num_hidden_layers,
hyper_hidden_ch):
super().__init__()
self.hyper_linear = HyperLinear(in_ch=in_ch,
out_ch=out_ch,
hyper_in_ch=hyper_in_ch,
hyper_num_hidden_layers=hyper_num_hidden_layers,
hyper_hidden_ch=hyper_hidden_ch)
self.norm_nl = nn.Sequential(
nn.LayerNorm([out_ch], elementwise_affine=False),
nn.ReLU(inplace=True)
)
def forward(self, hyper_input):
'''
:param hyper_input: input to hypernetwork.
:return: nn.Module; predicted fully connected network.
'''
return nn.Sequential(self.hyper_linear(hyper_input), self.norm_nl)
class HyperFC(nn.Module):
'''Builds a hypernetwork that predicts a fully connected neural network.
'''
def __init__(self,
hyper_in_ch,
hyper_num_hidden_layers,
hyper_hidden_ch,
hidden_ch,
num_hidden_layers,
in_ch,
out_ch,
outermost_linear=False):
super().__init__()
PreconfHyperLinear = partialclass(HyperLinear,
hyper_in_ch=hyper_in_ch,
hyper_num_hidden_layers=hyper_num_hidden_layers,
hyper_hidden_ch=hyper_hidden_ch)
PreconfHyperLayer = partialclass(HyperLayer,
hyper_in_ch=hyper_in_ch,
hyper_num_hidden_layers=hyper_num_hidden_layers,
hyper_hidden_ch=hyper_hidden_ch)
self.layers = nn.ModuleList()
self.layers.append(PreconfHyperLayer(in_ch=in_ch, out_ch=hidden_ch))
for i in range(num_hidden_layers):
self.layers.append(PreconfHyperLayer(in_ch=hidden_ch, out_ch=hidden_ch))
if outermost_linear:
self.layers.append(PreconfHyperLinear(in_ch=hidden_ch, out_ch=out_ch))
else:
self.layers.append(PreconfHyperLayer(in_ch=hidden_ch, out_ch=out_ch))
def forward(self, hyper_input):
'''
:param hyper_input: Input to hypernetwork.
:return: nn.Module; Predicted fully connected neural network.
'''
net = []
for i in range(len(self.layers)):
net.append(self.layers[i](hyper_input))
return nn.Sequential(*net)
class BatchLinear(nn.Module):
def __init__(self,
weights,
biases):
'''Implements a batch linear layer.
:param weights: Shape: (batch, out_ch, in_ch)
:param biases: Shape: (batch, 1, out_ch)
'''
super().__init__()
self.weights = weights
self.biases = biases
def __repr__(self):
return "BatchLinear(batch=%d, in_ch=%d, out_ch=%d)"%(
self.weights.shape[0], self.weights.shape[-1], self.weights.shape[-2])
def forward(self, input):
output = input.matmul(self.weights.permute(*[i for i in range(len(self.weights.shape)-2)], -1, -2))
output += self.biases
return output
def last_hyper_layer_init(m):
if type(m) == nn.Linear:
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
m.weight.data *= 1e-1
class HyperLinear(nn.Module):
'''A hypernetwork that predicts a single linear layer (weights & biases).'''
def __init__(self,
in_ch,
out_ch,
hyper_in_ch,
hyper_num_hidden_layers,
hyper_hidden_ch):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.hypo_params = FCBlock(
in_features=hyper_in_ch,
hidden_ch=hyper_hidden_ch,
num_hidden_layers=hyper_num_hidden_layers,
out_features=(in_ch * out_ch) + out_ch,
outermost_linear=True)
self.hypo_params[-1].apply(last_hyper_layer_init)
def forward(self, hyper_input):
hypo_params = self.hypo_params(hyper_input.cuda())
# Indices explicit to catch erros in shape of output layer
weights = hypo_params[..., :self.in_ch * self.out_ch]
biases = hypo_params[..., self.in_ch * self.out_ch:(self.in_ch * self.out_ch)+self.out_ch]
biases = biases.view(*(biases.size()[:-1]), 1, self.out_ch)
weights = weights.view(*(weights.size()[:-1]), self.out_ch, self.in_ch)
return BatchLinear(weights=weights, biases=biases)
| 8,327 | 32.991837 | 125 |
py
|
NSVF
|
NSVF-main/fairnr/modules/module_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules import LayerNorm
from fairseq.utils import get_activation_fn
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
return m
class PosEmbLinear(nn.Module):
def __init__(self, in_dim, out_dim, no_linear=False, scale=1, *args, **kwargs):
super().__init__()
assert out_dim % (2 * in_dim) == 0, "dimension must be dividable"
half_dim = out_dim // 2 // in_dim
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
self.emb = nn.Parameter(emb, requires_grad=False)
self.linear = Linear(out_dim, out_dim) if not no_linear else None
self.scale = scale
self.in_dim = in_dim
self.out_dim = out_dim
self.cat_input = False
def forward(self, x):
assert x.size(-1) == self.in_dim, "size must match"
sizes = x.size()
x = self.scale * x.unsqueeze(-1) @ self.emb.unsqueeze(0)
x = torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
x = x.view(*sizes[:-1], self.out_dim)
if self.linear is not None:
return self.linear(x)
return x
class NeRFPosEmbLinear(nn.Module):
def __init__(self, in_dim, out_dim, angular=False, no_linear=False, cat_input=False):
super().__init__()
assert out_dim % (2 * in_dim) == 0, "dimension must be dividable"
L = out_dim // 2 // in_dim
emb = torch.exp(torch.arange(L, dtype=torch.float) * math.log(2.))
if not angular:
emb = emb * math.pi
self.emb = nn.Parameter(emb, requires_grad=False)
self.angular = angular
self.linear = Linear(out_dim, out_dim) if not no_linear else None
self.in_dim = in_dim
self.out_dim = out_dim
self.cat_input = cat_input
def forward(self, x):
assert x.size(-1) == self.in_dim, "size must match"
sizes = x.size()
inputs = x.clone()
if self.angular:
x = torch.acos(x.clamp(-1 + 1e-6, 1 - 1e-6))
x = x.unsqueeze(-1) @ self.emb.unsqueeze(0)
x = torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
x = x.view(*sizes[:-1], self.out_dim)
if self.linear is not None:
x = self.linear(x)
if self.cat_input:
x = torch.cat([x, inputs], -1)
return x
def extra_repr(self) -> str:
outstr = 'Sinusoidal (in={}, out={}, angular={})'.format(
self.in_dim, self.out_dim, self.angular)
if self.cat_input:
outstr = 'Cat({}, {})'.format(outstr, self.in_dim)
return outstr
class FCLayer(nn.Module):
"""
Reference:
https://github.com/vsitzmann/pytorch_prototyping/blob/10f49b1e7df38a58fd78451eac91d7ac1a21df64/pytorch_prototyping.py
"""
def __init__(self, in_dim, out_dim, with_ln=True):
super().__init__()
self.net = [nn.Linear(in_dim, out_dim)]
if with_ln:
self.net += [nn.LayerNorm([out_dim])]
self.net += [nn.ReLU()]
self.net = nn.Sequential(*self.net)
def forward(self, x):
return self.net(x)
class FCBlock(nn.Module):
def __init__(self,
hidden_ch,
num_hidden_layers,
in_features,
out_features,
outermost_linear=False,
with_ln=True):
super().__init__()
self.net = []
self.net.append(FCLayer(in_features, hidden_ch, with_ln))
for i in range(num_hidden_layers):
self.net.append(FCLayer(hidden_ch, hidden_ch, with_ln))
if outermost_linear:
self.net.append(Linear(hidden_ch, out_features))
else:
self.net.append(FCLayer(hidden_ch, out_features, with_ln))
self.net = nn.Sequential(*self.net)
self.net.apply(self.init_weights)
def __getitem__(self, item):
return self.net[item]
def init_weights(self, m):
if type(m) == nn.Linear:
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
def forward(self, input):
return self.net(input)
class InvertableMapping(nn.Module):
def __init__(self, style='simple'):
super().__init__()
self.style = style
def f(self, x): # (0, 1) --> (0, +inf)
if self.style == 'simple':
return x / (1 - x + 1e-7)
raise NotImplementedError
def g(self, y): # (0, +inf) --> (0, 1)
if self.style == 'simple':
return y / (1 + y)
raise NotImplementedError
def dy(self, x):
if self.style == 'simple':
return 1 / ((1 - x) ** 2 + 1e-7)
raise NotImplementedError
| 5,337 | 31.54878 | 125 |
py
|
NSVF
|
NSVF-main/fairnr/modules/implicit.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.utils import get_activation_fn
from fairnr.modules.hyper import HyperFC
from fairnr.modules.module_utils import FCLayer
class BackgroundField(nn.Module):
"""
Background (we assume a uniform color)
"""
def __init__(self, out_dim=3, bg_color="1.0,1.0,1.0", min_color=-1, stop_grad=False, background_depth=5.0):
super().__init__()
if out_dim == 3: # directly model RGB
bg_color = [float(b) for b in bg_color.split(',')] if isinstance(bg_color, str) else [bg_color]
if min_color == -1:
bg_color = [b * 2 - 1 for b in bg_color]
if len(bg_color) == 1:
bg_color = bg_color + bg_color + bg_color
bg_color = torch.tensor(bg_color)
else:
bg_color = torch.ones(out_dim).uniform_()
if min_color == -1:
bg_color = bg_color * 2 - 1
self.out_dim = out_dim
self.bg_color = nn.Parameter(bg_color, requires_grad=not stop_grad)
self.depth = background_depth
def forward(self, x, **kwargs):
return self.bg_color.unsqueeze(0).expand(
*x.size()[:-1], self.out_dim)
class ImplicitField(nn.Module):
def __init__(self, in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=False, with_ln=True, skips=None, spec_init=True):
super().__init__()
self.skips = skips
self.net = []
prev_dim = in_dim
for i in range(num_layers):
next_dim = out_dim if i == (num_layers - 1) else hidden_dim
if (i == (num_layers - 1)) and outmost_linear:
self.net.append(nn.Linear(prev_dim, next_dim))
else:
self.net.append(FCLayer(prev_dim, next_dim, with_ln=with_ln))
prev_dim = next_dim
if (self.skips is not None) and (i in self.skips) and (i != (num_layers - 1)):
prev_dim += in_dim
if num_layers > 0:
self.net = nn.ModuleList(self.net)
if spec_init:
self.net.apply(self.init_weights)
def forward(self, x):
y = self.net[0](x)
for i in range(len(self.net) - 1):
if (self.skips is not None) and (i in self.skips):
y = torch.cat((x, y), dim=-1)
y = self.net[i+1](y)
return y
def init_weights(self, m):
if type(m) == nn.Linear:
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
class HyperImplicitField(nn.Module):
def __init__(self, hyper_in_dim, in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=False):
super().__init__()
self.hyper_in_dim = hyper_in_dim
self.in_dim = in_dim
self.net = HyperFC(
hyper_in_dim,
1, 256,
hidden_dim,
num_layers,
in_dim,
out_dim,
outermost_linear=outmost_linear
)
def forward(self, x, c):
assert (x.size(-1) == self.in_dim) and (c.size(-1) == self.hyper_in_dim)
if self.nerfpos is not None:
x = torch.cat([x, self.nerfpos(x)], -1)
return self.net(c)(x.unsqueeze(0)).squeeze(0)
class SignedDistanceField(ImplicitField):
"""
Predictor for density or SDF values.
"""
def __init__(self, in_dim, hidden_dim, num_layers=1,
recurrent=False, with_ln=True, spec_init=True):
super().__init__(in_dim, in_dim, in_dim, num_layers-1, with_ln=with_ln, spec_init=spec_init)
self.recurrent = recurrent
if recurrent:
assert num_layers > 1
self.hidden_layer = nn.LSTMCell(input_size=in_dim, hidden_size=hidden_dim)
self.hidden_layer.apply(init_recurrent_weights)
lstm_forget_gate_init(self.hidden_layer)
else:
self.hidden_layer = FCLayer(in_dim, hidden_dim, with_ln) \
if num_layers > 0 else nn.Identity()
prev_dim = hidden_dim if num_layers > 0 else in_dim
self.output_layer = nn.Linear(prev_dim, 1)
def forward(self, x, state=None):
if self.recurrent:
shape = x.size()
state = self.hidden_layer(x.view(-1, shape[-1]), state)
if state[0].requires_grad:
state[0].register_hook(lambda x: x.clamp(min=-5, max=5))
return self.output_layer(state[0].view(*shape[:-1], -1)).squeeze(-1), state
else:
return self.output_layer(self.hidden_layer(x)).squeeze(-1), None
class TextureField(ImplicitField):
"""
Pixel generator based on 1x1 conv networks
"""
def __init__(self, in_dim, hidden_dim, num_layers,
with_alpha=False, with_ln=True, spec_init=True):
out_dim = 3 if not with_alpha else 4
super().__init__(in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=True, with_ln=with_ln, spec_init=spec_init)
# ------------------ #
# helper functions #
# ------------------ #
def init_recurrent_weights(self):
for m in self.modules():
if type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
nn.init.kaiming_normal_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def lstm_forget_gate_init(lstm_layer):
for name, parameter in lstm_layer.named_parameters():
if not "bias" in name: continue
n = parameter.size(0)
start, end = n // 4, n // 2
parameter.data[start:end].fill_(1.)
def clip_grad_norm_hook(x, max_norm=10):
total_norm = x.norm()
total_norm = total_norm ** (1 / 2.)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
return x * clip_coef
| 6,163 | 34.837209 | 111 |
py
|
NSVF
|
NSVF-main/fairnr/criterions/rendering_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn.functional as F
import torch
from torch import Tensor
from fairseq import metrics
from fairseq.utils import item
from fairseq.criterions import FairseqCriterion, register_criterion
import fairnr.criterions.utils as utils
class RenderingCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(task)
self.args = args
self.hierarchical = getattr(args, 'hierarchical_loss', False)
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument('--hierarchical-loss', action='store_true',
help='if set, it computes both the coarse and fine-level losses in hierarchical sampling.')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample)
sample.update(net_output['samples'])
loss, loss_output = self.compute_loss(model, net_output, sample, reduce=reduce)
if self.hierarchical:
assert net_output.get('coarse', None) is not None, "missing coarse level outputs."
loss0, loss_output0 = self.compute_loss(model, net_output['coarse'], sample, reduce=reduce)
loss = loss + loss0
loss_output.update({'cor-' + key: loss_output0[key] for key in loss_output0})
sample_size = 1
logging_output = {
'loss': loss.data.item() if reduce else loss.data,
'nsentences': sample['alpha'].size(0),
'ntokens': sample['alpha'].size(1),
'npixels': sample['alpha'].size(2),
'sample_size': sample_size,
}
for w in loss_output:
logging_output[w] = loss_output[w]
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
raise NotImplementedError
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
summed_logging_outputs = {
w: sum(log.get(w, 0) for log in logging_outputs)
for w in logging_outputs[0]
}
sample_size = summed_logging_outputs['sample_size']
for w in summed_logging_outputs:
if '_loss' in w:
metrics.log_scalar(w.split('_')[0], summed_logging_outputs[w] / sample_size, sample_size, round=3)
elif '_weight' in w:
metrics.log_scalar('w_' + w[:3], summed_logging_outputs[w] / sample_size, sample_size, round=3)
elif '_acc' in w:
metrics.log_scalar('a_' + w[:3], summed_logging_outputs[w] / sample_size, sample_size, round=3)
elif w == 'loss':
metrics.log_scalar('loss', summed_logging_outputs['loss'] / sample_size, sample_size, priority=0, round=3)
elif '_log' in w:
metrics.log_scalar(w[:3], summed_logging_outputs[w] / sample_size, sample_size, priority=1, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
@register_criterion('srn_loss')
class SRNLossCriterion(RenderingCriterion):
def __init__(self, args, task):
super().__init__(args, task)
# HACK: to avoid warnings in c10d
self.dummy_loss = torch.nn.Parameter(torch.tensor(0.0, dtype=torch.float32), requires_grad=True)
if args.vgg_weight > 0:
from fairnr.criterions.perceptual_loss import VGGPerceptualLoss
self.vgg = VGGPerceptualLoss(resize=False)
if args.eval_lpips:
from lpips_pytorch import LPIPS
self.lpips = LPIPS(net_type='alex', version='0.1')
@staticmethod
def add_args(parser):
RenderingCriterion.add_args(parser)
parser.add_argument('--L1', action='store_true',
help='if enabled, use L1 instead of L2 for RGB loss')
parser.add_argument('--color-weight', type=float, default=256.0)
parser.add_argument('--depth-weight', type=float, default=0.0)
parser.add_argument('--depth-weight-decay', type=str, default=None,
help="""if set, use tuple to set (final_ratio, steps).
For instance, (0, 30000)
""")
parser.add_argument('--alpha-weight', type=float, default=0.0)
parser.add_argument('--vgg-weight', type=float, default=0.0)
parser.add_argument('--eikonal-weight', type=float, default=0.0)
parser.add_argument('--regz-weight', type=float, default=0.0)
parser.add_argument('--vgg-level', type=int, choices=[1,2,3,4], default=2)
parser.add_argument('--eval-lpips', action='store_true',
help="evaluate LPIPS scores in validation")
parser.add_argument('--no-background-loss', action='store_true')
def compute_loss(self, model, net_output, sample, reduce=True):
losses, other_logs = {}, {}
# prepare data before computing loss
sampled_uv = sample['sampled_uv'] # S, V, 2, N, P, P (patch-size)
S, V, _, N, P1, P2 = sampled_uv.size()
H, W, h, w = sample['size'][0, 0].long().cpu().tolist()
L = N * P1 * P2
flatten_uv = sampled_uv.view(S, V, 2, L)
flatten_index = (flatten_uv[:,:,0] // h + flatten_uv[:,:,1] // w * W).long()
assert 'colors' in sample and sample['colors'] is not None, "ground-truth colors not provided"
target_colors = sample['colors']
masks = (sample['alpha'] > 0) if self.args.no_background_loss else None
if L < target_colors.size(2):
target_colors = target_colors.gather(2, flatten_index.unsqueeze(-1).repeat(1,1,1,3))
masks = masks.gather(2, flatten_uv) if masks is not None else None
if 'other_logs' in net_output:
other_logs.update(net_output['other_logs'])
# computing loss
if self.args.color_weight > 0:
color_loss = utils.rgb_loss(
net_output['colors'], target_colors,
masks, self.args.L1)
losses['color_loss'] = (color_loss, self.args.color_weight)
if self.args.alpha_weight > 0:
_alpha = net_output['missed'].reshape(-1)
alpha_loss = torch.log1p(
1. / 0.11 * _alpha.float() * (1 - _alpha.float())
).mean().type_as(_alpha)
losses['alpha_loss'] = (alpha_loss, self.args.alpha_weight)
if self.args.depth_weight > 0:
if sample['depths'] is not None:
target_depths = target_depths.gather(2, flatten_index)
depth_mask = masks & (target_depths > 0)
depth_loss = utils.depth_loss(net_output['depths'], target_depths, depth_mask)
else:
# no depth map is provided, depth loss only applied on background based on masks
max_depth_target = self.args.max_depth * torch.ones_like(net_output['depths'])
if sample['mask'] is not None:
depth_loss = utils.depth_loss(net_output['depths'], max_depth_target, (1 - sample['mask']).bool())
else:
depth_loss = utils.depth_loss(net_output['depths'], max_depth_target, ~masks)
depth_weight = self.args.depth_weight
if self.args.depth_weight_decay is not None:
final_factor, final_steps = eval(self.args.depth_weight_decay)
depth_weight *= max(0, 1 - (1 - final_factor) * self.task._num_updates / final_steps)
other_logs['depth_weight'] = depth_weight
losses['depth_loss'] = (depth_loss, depth_weight)
if self.args.vgg_weight > 0:
assert P1 * P2 > 1, "we have to use a patch-based sampling for VGG loss"
target_colors = target_colors.reshape(-1, P1, P2, 3).permute(0, 3, 1, 2) * .5 + .5
output_colors = net_output['colors'].reshape(-1, P1, P2, 3).permute(0, 3, 1, 2) * .5 + .5
vgg_loss = self.vgg(output_colors, target_colors)
losses['vgg_loss'] = (vgg_loss, self.args.vgg_weight)
if self.args.eikonal_weight > 0:
losses['eik_loss'] = (net_output['eikonal-term'].mean(), self.args.eikonal_weight)
# if self.args.regz_weight > 0:
losses['reg_loss'] = (net_output['regz-term'].mean(), self.args.regz_weight)
loss = sum(losses[key][0] * losses[key][1] for key in losses)
# add a dummy loss
loss = loss + model.dummy_loss + self.dummy_loss * 0.
logging_outputs = {key: item(losses[key][0]) for key in losses}
logging_outputs.update(other_logs)
return loss, logging_outputs
| 9,677 | 43.805556 | 122 |
py
|
NSVF
|
NSVF-main/fairnr/criterions/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
TINY = 1e-7
def rgb_loss(predicts, rgbs, masks=None, L1=False, sum=False):
if masks is not None:
if masks.sum() == 0:
return predicts.new_zeros(1).mean()
predicts = predicts[masks]
rgbs = rgbs[masks]
if L1:
loss = torch.abs(predicts - rgbs).sum(-1)
else:
loss = ((predicts - rgbs) ** 2).sum(-1)
return loss.mean() if not sum else loss.sum()
def depth_loss(depths, depth_gt, masks=None, sum=False):
if masks is not None:
if masks.sum() == 0:
return depths.new_zeros(1).mean()
depth_gt = depth_gt[masks]
depths = depths[masks]
loss = (depths[masks] - depth_gt[masks]) ** 2
return loss.mean() if not sum else loss.sum()
| 971 | 26 | 65 |
py
|
NSVF
|
NSVF-main/fairnr/criterions/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"fairnr.criterions." + criterion_name
)
| 458 | 29.6 | 65 |
py
|
NSVF
|
NSVF-main/fairnr/criterions/perceptual_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchvision
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, resize=False):
super(VGGPerceptualLoss, self).__init__()
blocks = []
blocks.append(torchvision.models.vgg16(pretrained=True).features[:4].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[4:9].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[9:16].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[16:23].eval())
self.blocks = torch.nn.ModuleList(blocks)
self.transform = torch.nn.functional.interpolate
self.mean = torch.nn.Parameter(torch.tensor([0.485, 0.456, 0.406]).view(1,3,1,1))
self.std = torch.nn.Parameter(torch.tensor([0.229, 0.224, 0.225]).view(1,3,1,1))
self.resize = resize
# NO GRADIENT!
for param in self.parameters():
param.requires_grad = False
def forward(self, input, target, level=2):
# print(input.device, input.dtype, self.mean.device, self.mean.dtype, self.std, self.std.dtype)
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss = 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i < level:
loss += torch.nn.functional.mse_loss(x, y)
else:
break
return loss
| 2,023 | 39.48 | 103 |
py
|
NSVF
|
NSVF-main/fairnr/models/nsvf_bg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import cv2, math, time, copy, json
import numpy as np
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
register_model,
register_model_architecture
)
from fairseq.utils import item, with_torch_seed
from fairnr.data.geometry import compute_normal_map, fill_in
from fairnr.models.nsvf import NSVFModel, base_architecture, nerf_style_architecture
from fairnr.models.fairnr_model import get_encoder, get_field, get_reader, get_renderer
@register_model('nsvf_bg')
class NSVFBGModel(NSVFModel):
def __init__(self, args, setups):
super().__init__(args, setups)
args_copy = copy.deepcopy(args)
if getattr(args, "bg_field_args", None) is not None:
args_copy.__dict__.update(json.loads(args.bg_field_args))
else:
args_copy.inputs_to_density = "pos:10"
args_copy.inputs_to_texture = "feat:0:256, ray:4:3:b"
self.bg_field = get_field("radiance_field")(args_copy)
self.bg_encoder = get_encoder("volume_encoder")(args_copy)
@classmethod
def add_args(cls, parser):
super().add_args(parser)
parser.add_argument('--near', type=float, help='near distance of the volume')
parser.add_argument('--far', type=float, help='far distance of the volume')
parser.add_argument('--nerf-steps', type=int, help='additional nerf steps')
parser.add_argument('--bg-field-args', type=str, default=None, help='override args for bg field')
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# we will trace the background field here
S, V, P = sizes
fullsize = S * V * P
vox_colors = fill_in((fullsize, 3), hits, all_results['colors'], 0.0)
vox_missed = fill_in((fullsize, ), hits, all_results['missed'], 1.0)
vox_depths = fill_in((fullsize, ), hits, all_results['depths'], 0.0)
mid_dis = (self.args.near + self.args.far) / 2
n_depth = fill_in((fullsize, ), hits, all_results['min_depths'], mid_dis)[:, None]
f_depth = fill_in((fullsize, ), hits, all_results['max_depths'], mid_dis)[:, None]
# front field
nerf_step = getattr(self.args, "nerf_steps", 64)
max_depth = n_depth
min_depth = torch.ones_like(max_depth) * self.args.near
intersection_outputs = {
"min_depth": min_depth, "max_depth": max_depth,
"probs": torch.ones_like(max_depth),
"steps": torch.ones_like(max_depth).squeeze(-1) * nerf_step,
"intersected_voxel_idx": torch.zeros_like(min_depth).int()}
with with_torch_seed(self.unique_seed):
fg_samples = self.bg_encoder.ray_sample(intersection_outputs)
fg_results = self.raymarcher(
self.bg_encoder, self.bg_field, ray_start, ray_dir, fg_samples, {})
# back field
min_depth = f_depth
max_depth = torch.ones_like(min_depth) * self.args.far
intersection_outputs = {
"min_depth": min_depth, "max_depth": max_depth,
"probs": torch.ones_like(max_depth),
"steps": torch.ones_like(max_depth).squeeze(-1) * nerf_step,
"intersected_voxel_idx": torch.zeros_like(min_depth).int()}
with with_torch_seed(self.unique_seed):
bg_samples = self.bg_encoder.ray_sample(intersection_outputs)
bg_results = self.raymarcher(
self.bg_encoder, self.bg_field, ray_start, ray_dir, bg_samples, {})
# merge background to foreground
all_results['voxcolors'] = vox_colors.view(S, V, P, 3)
all_results['colors'] = fg_results['colors'] + fg_results['missed'][:, None] * (vox_colors + vox_missed[:, None] * bg_results['colors'])
all_results['depths'] = fg_results['depths'] + fg_results['missed'] * (vox_depths + vox_missed * bg_results['depths'])
all_results['missed'] = fg_results['missed'] * vox_missed * bg_results['missed']
# apply the NSVF post-processing
return super().postprocessing(ray_start, ray_dir, all_results, hits, sizes)
def _visualize(self, images, sample, output, state, **kwargs):
img_id, shape, view, width, name = state
images = super()._visualize(images, sample, output, state, **kwargs)
if 'voxcolors' in output and output['voxcolors'] is not None:
images['{}_vcolors/{}:HWC'.format(name, img_id)] ={
'img': output['voxcolors'][shape, view],
'min_val': float(self.args.min_color)
}
return images
@register_model_architecture("nsvf_bg", "nsvf_bg")
def base_bg_architecture(args):
base_architecture(args)
@register_model_architecture("nsvf_bg", "nsvf_bg_xyz")
def base_bg2_architecture(args):
args.nerf_steps = getattr(args, "nerf_steps", 64)
nerf_style_architecture(args)
@register_model('shared_nsvf_bg')
class SharedNSVFBGModel(NSVFBGModel):
ENCODER = 'shared_sparsevoxel_encoder'
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# we will trace the background field here
# pass context vector from NSVF to NeRF
self.bg_encoder.precompute(context=self.encoder.contexts(self.encoder.cid).unsqueeze(0))
return super().postprocessing(ray_start, ray_dir, all_results, hits, sizes)
@torch.no_grad()
def split_voxels(self):
logger.info("half the global voxel size {:.4f} -> {:.4f}".format(
self.encoder.all_voxels[0].voxel_size.item(),
self.encoder.all_voxels[0].voxel_size.item() * .5))
self.encoder.splitting()
for id in range(len(self.encoder.all_voxels)):
self.encoder.all_voxels[id].voxel_size *= .5
self.encoder.all_voxels[id].max_hits *= 1.5
self.clean_caches()
@torch.no_grad()
def reduce_stepsize(self):
logger.info("reduce the raymarching step size {:.4f} -> {:.4f}".format(
self.encoder.all_voxels[0].step_size.item(),
self.encoder.all_voxels[0].step_size.item() * .5))
for id in range(len(self.encoder.all_voxels)):
self.encoder.all_voxels[id].step_size *= .5
@register_model_architecture("shared_nsvf_bg", "shared_nsvf_bg_xyz")
def base_shared_architecture(args):
args.context_embed_dim = getattr(args, "context_embed_dim", 96)
args.hypernetwork = getattr(args, "hypernetwork", False)
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10, context:0:96")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4:3:b")
args.bg_field_args = getattr(args, "bg_field_args",
"{'inputs_to_density': 'pos:10, context:0:96', 'inputs_to_texture': 'feat:0:256, ray:4:3:b}'}")
nerf_style_architecture(args)
| 7,079 | 43.810127 | 144 |
py
|
NSVF
|
NSVF-main/fairnr/models/multi_nsvf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import torch
from fairseq.models import (
register_model,
register_model_architecture
)
from fairnr.models.nsvf import NSVFModel, base_architecture
@register_model('multi_nsvf')
class MultiNSVFModel(NSVFModel):
ENCODER = 'multi_sparsevoxel_encoder'
@torch.no_grad()
def split_voxels(self):
logger.info("half the global voxel size {:.4f} -> {:.4f}".format(
self.encoder.all_voxels[0].voxel_size.item(),
self.encoder.all_voxels[0].voxel_size.item() * .5))
self.encoder.splitting()
for id in range(len(self.encoder.all_voxels)):
self.encoder.all_voxels[id].voxel_size *= .5
self.encoder.all_voxels[id].max_hits *= 1.5
@torch.no_grad()
def reduce_stepsize(self):
logger.info("reduce the raymarching step size {:.4f} -> {:.4f}".format(
self.encoder.all_voxels[0].step_size.item(),
self.encoder.all_voxels[0].step_size.item() * .5))
for id in range(len(self.encoder.all_voxels)):
self.encoder.all_voxels[id].step_size *= .5
@register_model("shared_nsvf")
class SharedNSVFModel(MultiNSVFModel):
ENCODER = 'shared_sparsevoxel_encoder'
@register_model_architecture('multi_nsvf', "multi_nsvf_base")
def multi_base_architecture(args):
base_architecture(args)
@register_model_architecture('shared_nsvf', 'shared_nsvf')
def shared_base_architecture(args):
# encoder
args.context_embed_dim = getattr(args, "context_embed_dim", 96)
# field
args.inputs_to_density = getattr(args, "inputs_to_density", "emb:6:32, context:0:96")
args.hypernetwork = getattr(args, "hypernetwork", False)
base_architecture(args)
| 1,938 | 30.786885 | 89 |
py
|
NSVF
|
NSVF-main/fairnr/models/nerf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import cv2, math, time
import numpy as np
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
register_model,
register_model_architecture
)
from fairseq.utils import with_torch_seed
from fairnr.models.fairnr_model import BaseModel
@register_model('nerf')
class NeRFModel(BaseModel):
""" This is a simple re-implementation of the vanilla NeRF
"""
ENCODER = 'volume_encoder'
READER = 'image_reader'
FIELD = 'radiance_field'
RAYMARCHER = 'volume_rendering'
@classmethod
def add_args(cls, parser):
super().add_args(parser)
parser.add_argument('--fixed-num-samples', type=int,
help='number of samples for the first pass along the ray.')
parser.add_argument('--fixed-fine-num-samples', type=int,
help='sample a fixed number of points for each ray in hierarchical sampling, e.g. 64, 128.')
parser.add_argument('--reduce-fine-for-missed', action='store_true',
help='if set, the number of fine samples is discounted based on foreground probability only.')
def preprocessing(self, **kwargs):
return self.encoder.precompute(**kwargs)
def intersecting(self, ray_start, ray_dir, encoder_states, **kwargs):
ray_start, ray_dir, intersection_outputs, hits = \
self.encoder.ray_intersect(ray_start, ray_dir, encoder_states)
return ray_start, ray_dir, intersection_outputs, hits, None
def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, fine=False):
# sample points and use middle point approximation
with with_torch_seed(self.unique_seed): # make sure each GPU sample differently.
samples = self.encoder.ray_sample(intersection_outputs)
field = self.field_fine if fine and (self.field_fine is not None) else self.field
all_results = self.raymarcher(
self.encoder, field, ray_start, ray_dir, samples, encoder_states
)
return samples, all_results
def prepare_hierarchical_sampling(self, intersection_outputs, samples, all_results):
# this function is basically the same as that in NSVF model.
depth = samples.get('original_point_depth', samples['sampled_point_depth'])
dists = samples.get('original_point_distance', samples['sampled_point_distance'])
intersection_outputs['min_depth'] = depth - dists * .5
intersection_outputs['max_depth'] = depth + dists * .5
intersection_outputs['intersected_voxel_idx'] = samples['sampled_point_voxel_idx'].contiguous()
# safe_probs = all_results['probs'] + 1e-8 # HACK: make a non-zero distribution
safe_probs = all_results['probs'] + 1e-5 # NeRF used 1e-5, will this make a change?
intersection_outputs['probs'] = safe_probs / safe_probs.sum(-1, keepdim=True)
intersection_outputs['steps'] = safe_probs.new_ones(*safe_probs.size()[:-1])
if getattr(self.args, "fixed_fine_num_samples", 0) > 0:
intersection_outputs['steps'] = intersection_outputs['steps'] * self.args.fixed_fine_num_samples
if getattr(self.args, "reduce_fine_for_missed", False):
intersection_outputs['steps'] = intersection_outputs['steps'] * safe_probs.sum(-1)
return intersection_outputs
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# vanilla nerf hits everything. so no need to fill_in
S, V, P = sizes
fullsize = S * V * P
all_results['missed'] = all_results['missed'].view(S, V, P)
all_results['colors'] = all_results['colors'].view(S, V, P, 3)
all_results['depths'] = all_results['depths'].view(S, V, P)
if 'z' in all_results:
all_results['z'] = all_results['z'].view(S, V, P)
BG_DEPTH = self.field.bg_color.depth
bg_color = self.field.bg_color(all_results['colors'])
all_results['colors'] += all_results['missed'].unsqueeze(-1) * bg_color.reshape(fullsize, 3).view(S, V, P, 3)
all_results['depths'] += all_results['missed'] * BG_DEPTH
if 'normal' in all_results:
all_results['normal'] = all_results['normal'].view(S, V, P, 3)
return all_results
def add_other_logs(self, all_results):
return {}
@register_model_architecture("nerf", "nerf_base")
def base_architecture(args):
# parameter needs to be changed
args.near = getattr(args, "near", 2)
args.far = getattr(args, "far", 4)
args.fixed_num_samples = getattr(args, "fixed_num_samples", 64)
args.fixed_fine_num_samples = getattr(args, "fixed_fine_num_samples", 128)
args.hierarchical_sampling = getattr(args, "hierarchical_sampling", True)
args.use_fine_model = getattr(args, "use_fine_model", True)
# field
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
args.feature_embed_dim = getattr(args, "feature_embed_dim", 256)
args.density_embed_dim = getattr(args, "density_embed_dim", 128)
args.texture_embed_dim = getattr(args, "texture_embed_dim", 256)
# API Update: fix the number of layers
args.feature_layers = getattr(args, "feature_layers", 1)
args.texture_layers = getattr(args, "texture_layers", 3)
args.background_stop_gradient = getattr(args, "background_stop_gradient", False)
args.background_depth = getattr(args, "background_depth", 5.0)
# raymarcher
args.discrete_regularization = getattr(args, "discrete_regularization", False)
args.deterministic_step = getattr(args, "deterministic_step", False)
args.raymarching_tolerance = getattr(args, "raymarching_tolerance", 0)
# reader
args.pixel_per_view = getattr(args, "pixel_per_view", 2048)
args.sampling_on_mask = getattr(args, "sampling_on_mask", 0.0)
args.sampling_at_center = getattr(args, "sampling_at_center", 1.0)
args.sampling_on_bbox = getattr(args, "sampling_on_bbox", False)
args.sampling_patch_size = getattr(args, "sampling_patch_size", 1)
args.sampling_skipping_size = getattr(args, "sampling_skipping_size", 1)
# others
args.chunk_size = getattr(args, "chunk_size", 64)
args.valid_chunk_size = getattr(args, "valid_chunk_size", 64)
@register_model_architecture("nerf", "nerf_deep")
def nerf_deep_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
base_architecture(args)
@register_model_architecture("nerf", "nerf_nerf")
def nerf_nerf_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.texture_layers = getattr(args, "texture_layers", 0)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
base_architecture(args)
@register_model_architecture("nerf", "nerf_xyzn_nope")
def nerf2_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:0:3, normal:0:3, sigma:0:1, ray:4")
base_architecture(args)
@register_model('sdf_nerf')
class SDFNeRFModel(NeRFModel):
FIELD = "sdf_radiance_field"
@register_model_architecture("sdf_nerf", "sdf_nerf")
def sdf_nsvf_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
nerf2_architecture(args)
@register_model('sg_nerf')
class SGNeRFModel(NeRFModel):
""" This is a simple re-implementation of the vanilla NeRF
"""
ENCODER = 'infinite_volume_encoder'
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# vanilla nerf hits everything. so no need to fill_in
S, V, P = sizes
all_results['missed'] = all_results['missed'].view(S, V, P)
all_results['colors'] = all_results['colors'].view(S, V, P, 3)
all_results['depths'] = all_results['depths'].view(S, V, P)
if 'z' in all_results:
all_results['z'] = all_results['z'].view(S, V, P)
if 'normal' in all_results:
all_results['normal'] = all_results['normal'].view(S, V, P, 3)
return all_results
@register_model_architecture("sg_nerf", "sg_nerf_base")
def sg_nerf_architecture(args):
INF_FAR = 1e6
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10:4")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4:3:b")
args.near = getattr(args, "near", 2)
args.far = getattr(args, "far", INF_FAR)
base_architecture(args)
@register_model_architecture("sg_nerf", "sg_nerf_new")
def sg_nerf2_architecture(args):
args.nerf_style_mlp = getattr(args, "nerf_style_mlp", True)
args.texture_embed_dim = getattr(args, "texture_embed_dim", 128)
sg_nerf_architecture(args)
| 9,380 | 43.25 | 117 |
py
|
NSVF
|
NSVF-main/fairnr/models/fairnr_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Base classes for various models.
The basic principle of differentiable rendering is two components:
-- an field or so-called geometric field (GE)
-- an raymarcher or so-called differentiable ray-marcher (RM)
So it can be composed as a GERM model
"""
import logging
import torch
import torch.nn as nn
import skimage.metrics
import imageio, os
import numpy as np
import copy
from collections import defaultdict
from fairseq.models import BaseFairseqModel
from fairseq.utils import with_torch_seed
from fairnr.modules.encoder import get_encoder
from fairnr.modules.field import get_field
from fairnr.modules.renderer import get_renderer
from fairnr.modules.reader import get_reader
from fairnr.data.geometry import ray, compute_normal_map
from fairnr.data.data_utils import recover_image
logger = logging.getLogger(__name__)
class BaseModel(BaseFairseqModel):
"""Base class"""
ENCODER = 'abstract_encoder'
FIELD = 'abstract_field'
RAYMARCHER = 'abstract_renderer'
READER = 'abstract_reader'
def __init__(self, args, setups):
super().__init__()
self.args = args
self.hierarchical = getattr(self.args, "hierarchical_sampling", False)
self.reader = setups['reader']
self.encoder = setups['encoder']
self.field = setups['field']
self.raymarcher = setups['raymarcher']
self.cache = None
self._num_updates = 0
if getattr(self.args, "use_fine_model", False):
self.field_fine = copy.deepcopy(self.field)
else:
self.field_fine = None
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
reader = get_reader(cls.READER)(args)
encoder = get_encoder(cls.ENCODER)(args)
field = get_field(cls.FIELD)(args)
raymarcher = get_renderer(cls.RAYMARCHER)(args)
setups = {
'reader': reader,
'encoder': encoder,
'field': field,
'raymarcher': raymarcher
}
return cls(args, setups)
@classmethod
def add_args(cls, parser):
get_reader(cls.READER).add_args(parser)
get_renderer(cls.RAYMARCHER).add_args(parser)
get_encoder(cls.ENCODER).add_args(parser)
get_field(cls.FIELD).add_args(parser)
# model-level args
parser.add_argument('--hierarchical-sampling', action='store_true',
help='if set, a second ray marching pass will be performed based on the first time probs.')
parser.add_argument('--use-fine-model', action='store_true',
help='if set, we will simultaneously optimize two networks, a coarse field and a fine field.')
def set_num_updates(self, num_updates):
self._num_updates = num_updates
super().set_num_updates(num_updates)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
if (self.field_fine is None) and \
("field_fine" in [key.split('.')[0] for key in state_dict.keys()]):
# load checkpoint has fine-field network, copying weights to field network
for fine_key in [key for key in state_dict.keys() if "field_fine" in key]:
state_dict[fine_key.replace("field_fine", "field")] = state_dict[fine_key]
del state_dict[fine_key]
@property
def dummy_loss(self):
return sum([p.sum() for p in self.parameters()]) * 0.0
def forward(self, ray_split=1, **kwargs):
with with_torch_seed(self.unique_seed): # make sure different GPU sample different rays
ray_start, ray_dir, uv = self.reader(**kwargs)
kwargs.update({
'field_fn': self.field.forward,
'input_fn': self.encoder.forward})
if ray_split == 1:
results = self._forward(ray_start, ray_dir, **kwargs)
else:
total_rays = ray_dir.shape[2]
chunk_size = total_rays // ray_split
results = [
self._forward(
ray_start, ray_dir[:, :, i: i+chunk_size], **kwargs)
for i in range(0, total_rays, chunk_size)
]
results = self.merge_outputs(results)
results['samples'] = {
'sampled_uv': results.get('sampled_uv', uv),
'ray_start': ray_start,
'ray_dir': ray_dir
}
# caching the prediction
self.cache = {
w: results[w].detach()
if isinstance(w, torch.Tensor)
else results[w]
for w in results
}
return results
def _forward(self, ray_start, ray_dir, **kwargs):
S, V, P, _ = ray_dir.size()
assert S == 1, "we only supports single object for now."
encoder_states = self.preprocessing(**kwargs)
ray_start, ray_dir, intersection_outputs, hits, sampled_uv = \
self.intersecting(ray_start, ray_dir, encoder_states, **kwargs)
# save the original rays
ray_start0 = ray_start.reshape(-1, 3).clone()
ray_dir0 = ray_dir.reshape(-1, 3).clone()
P = ray_dir.size(1) // V
all_results = defaultdict(lambda: None)
if hits.sum() > 0:
intersection_outputs = {
name: outs[hits] for name, outs in intersection_outputs.items()}
ray_start, ray_dir = ray_start[hits], ray_dir[hits]
encoder_states = {name: s.reshape(-1, s.size(-1)) if s is not None else None
for name, s in encoder_states.items()}
samples, all_results = self.raymarching( # ray-marching
ray_start, ray_dir, intersection_outputs, encoder_states)
if self.hierarchical: # hierarchical sampling
intersection_outputs = self.prepare_hierarchical_sampling(
intersection_outputs, samples, all_results)
coarse_results = all_results.copy()
samples, all_results = self.raymarching(
ray_start, ray_dir, intersection_outputs, encoder_states, fine=True)
all_results['coarse'] = coarse_results
hits = hits.reshape(-1)
all_results = self.postprocessing(ray_start0, ray_dir0, all_results, hits, (S, V, P))
if self.hierarchical:
all_results['coarse'] = self.postprocessing(
ray_start, ray_dir, all_results['coarse'], hits, (S, V, P))
if sampled_uv is not None:
all_results['sampled_uv'] = sampled_uv
all_results['other_logs'] = self.add_other_logs(all_results)
return all_results
def preprocessing(self, **kwargs):
raise NotImplementedError
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
raise NotImplementedError
def intersecting(self, ray_start, ray_dir, encoder_states):
raise NotImplementedError
def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, fine=False):
raise NotImplementedError
def prepare_hierarchical_sampling(self, intersection_outputs, samples, all_results):
raise NotImplementedError
def add_other_logs(self, all_results):
raise NotImplementedError
def merge_outputs(self, outputs):
new_output = {}
for key in outputs[0]:
if isinstance(outputs[0][key], torch.Tensor) and outputs[0][key].dim() > 2:
new_output[key] = torch.cat([o[key] for o in outputs], 2)
else:
new_output[key] = outputs[0][key]
return new_output
@torch.no_grad()
def visualize(self, sample, output=None, shape=0, view=0, **kwargs):
width = int(sample['size'][shape, view][1].item())
img_id = '{}_{}'.format(sample['shape'][shape], sample['view'][shape, view])
if output is None:
assert self.cache is not None, "need to run forward-pass"
output = self.cache # make sure to run forward-pass.
sample.update(output['samples'])
images = {}
images = self._visualize(images, sample, output, [img_id, shape, view, width, 'render'])
images = self._visualize(images, sample, sample, [img_id, shape, view, width, 'target'])
if 'coarse' in output: # hierarchical sampling
images = self._visualize(images, sample, output['coarse'], [img_id, shape, view, width, 'coarse'])
images = {
tag: recover_image(width=width, **images[tag])
for tag in images if images[tag] is not None
}
return images
def _visualize(self, images, sample, output, state, **kwargs):
img_id, shape, view, width, name = state
if 'colors' in output and output['colors'] is not None:
images['{}_color/{}:HWC'.format(name, img_id)] ={
'img': output['colors'][shape, view],
'min_val': float(self.args.min_color)
}
if 'depths' in output and output['depths'] is not None:
min_depth, max_depth = output['depths'].min(), output['depths'].max()
if getattr(self.args, "near", None) is not None:
min_depth = self.args.near
max_depth = self.args.far
images['{}_depth/{}:HWC'.format(name, img_id)] = {
'img': output['depths'][shape, view],
'min_val': min_depth,
'max_val': max_depth}
normals = compute_normal_map(
sample['ray_start'][shape, view].float(),
sample['ray_dir'][shape, view].float(),
output['depths'][shape, view].float(),
sample['extrinsics'][shape, view].float().inverse(), width)
images['{}_normal/{}:HWC'.format(name, img_id)] = {
'img': normals, 'min_val': -1, 'max_val': 1}
# generate point clouds from depth
# images['{}_point/{}'.format(name, img_id)] = {
# 'img': torch.cat(
# [ray(sample['ray_start'][shape, view].float(),
# sample['ray_dir'][shape, view].float(),
# output['depths'][shape, view].unsqueeze(-1).float()),
# (output['colors'][shape, view] - self.args.min_color) / (1 - self.args.min_color)], 1), # XYZRGB
# 'raw': True }
if 'z' in output and output['z'] is not None:
images['{}_z/{}:HWC'.format(name, img_id)] = {
'img': output['z'][shape, view], 'min_val': 0, 'max_val': 1}
if 'normal' in output and output['normal'] is not None:
images['{}_predn/{}:HWC'.format(name, img_id)] = {
'img': output['normal'][shape, view], 'min_val': -1, 'max_val': 1}
return images
def add_eval_scores(self, logging_output, sample, output, criterion, scores=['ssim', 'psnr', 'lpips'], outdir=None):
predicts, targets = output['colors'], sample['colors']
ssims, psnrs, lpips, rmses = [], [], [], []
for s in range(predicts.size(0)):
for v in range(predicts.size(1)):
width = int(sample['size'][s, v][1])
p = recover_image(predicts[s, v], width=width, min_val=float(self.args.min_color))
t = recover_image(targets[s, v], width=width, min_val=float(self.args.min_color))
pn, tn = p.numpy(), t.numpy()
p, t = p.to(predicts.device), t.to(targets.device)
if 'ssim' in scores:
ssims += [skimage.metrics.structural_similarity(pn, tn, multichannel=True, data_range=1)]
if 'psnr' in scores:
psnrs += [skimage.metrics.peak_signal_noise_ratio(pn, tn, data_range=1)]
if 'lpips' in scores and hasattr(criterion, 'lpips'):
with torch.no_grad():
lpips += [criterion.lpips(
2 * p.unsqueeze(-1).permute(3,2,0,1) - 1,
2 * t.unsqueeze(-1).permute(3,2,0,1) - 1).item()]
if 'depths' in sample:
td = sample['depths'][sample['depths'] > 0]
pd = output['depths'][sample['depths'] > 0]
rmses += [torch.sqrt(((td - pd) ** 2).mean()).item()]
if outdir is not None:
def imsave(filename, image):
imageio.imsave(os.path.join(outdir, filename), (image * 255).astype('uint8'))
figname = '-{:03d}_{:03d}.png'.format(sample['id'][s], sample['view'][s, v])
imsave('output' + figname, pn)
imsave('target' + figname, tn)
imsave('normal' + figname, recover_image(compute_normal_map(
sample['ray_start'][s, v].float(), sample['ray_dir'][s, v].float(),
output['depths'][s, v].float(), sample['extrinsics'][s, v].float().inverse(), width=width),
min_val=-1, max_val=1, width=width).numpy())
if 'featn2' in output:
imsave('featn2' + figname, output['featn2'][s, v].cpu().numpy())
if 'voxel' in output:
imsave('voxel' + figname, output['voxel'][s, v].cpu().numpy())
if len(ssims) > 0:
logging_output['ssim_loss'] = np.mean(ssims)
if len(psnrs) > 0:
logging_output['psnr_loss'] = np.mean(psnrs)
if len(lpips) > 0:
logging_output['lpips_loss'] = np.mean(lpips)
if len(rmses) > 0:
logging_output['rmses_loss'] = np.mean(rmses)
def adjust(self, **kwargs):
raise NotImplementedError
@property
def text(self):
return "fairnr BaseModel"
@property
def unique_seed(self):
return self._num_updates * 137 + self.args.distributed_rank
| 14,302 | 41.19174 | 121 |
py
|
NSVF
|
NSVF-main/fairnr/models/nsvf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import cv2, math, time
import numpy as np
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
register_model,
register_model_architecture
)
from fairseq.utils import item
from fairnr.data.geometry import compute_normal_map, fill_in
from fairnr.models.nerf import NeRFModel
@register_model('nsvf')
class NSVFModel(NeRFModel):
READER = 'image_reader'
ENCODER = 'sparsevoxel_encoder'
FIELD = 'radiance_field'
RAYMARCHER = 'volume_rendering'
@classmethod
def add_args(cls, parser):
super().add_args(parser)
parser.add_argument('--fine-num-sample-ratio', type=float, default=0,
help='raito of samples compared to the first pass')
parser.add_argument('--inverse-distance-coarse-sampling', type=str,
choices=['none', 'camera', 'origin'], default='none',
help='if set, we do not sample points uniformly through voxels.')
def intersecting(self, ray_start, ray_dir, encoder_states, **kwargs):
S = ray_dir.size(0)
ray_start, ray_dir, intersection_outputs, hits, _ = \
super().intersecting(ray_start, ray_dir, encoder_states, **kwargs)
if self.reader.no_sampling and self.training: # sample points after ray-voxel intersection
uv, size = kwargs['uv'], kwargs['size']
mask = hits.reshape(*uv.size()[:2], uv.size(-1))
# sample rays based on voxel intersections
sampled_uv, sampled_masks = self.reader.sample_pixels(
uv, size, mask=mask, return_mask=True)
sampled_masks = sampled_masks.reshape(uv.size(0), -1).bool()
hits, sampled_masks = hits[sampled_masks].reshape(S, -1), sampled_masks.unsqueeze(-1)
intersection_outputs = {name: outs[sampled_masks.expand_as(outs)].reshape(S, -1, outs.size(-1))
for name, outs in intersection_outputs.items()}
ray_start = ray_start[sampled_masks.expand_as(ray_start)].reshape(S, -1, 3)
ray_dir = ray_dir[sampled_masks.expand_as(ray_dir)].reshape(S, -1, 3)
else:
sampled_uv = None
min_depth = intersection_outputs['min_depth']
max_depth = intersection_outputs['max_depth']
pts_idx = intersection_outputs['intersected_voxel_idx']
dists = (max_depth - min_depth).masked_fill(pts_idx.eq(-1), 0)
intersection_outputs['probs'] = dists / dists.sum(dim=-1, keepdim=True)
if getattr(self.args, "fixed_num_samples", 0) > 0:
intersection_outputs['steps'] = intersection_outputs['min_depth'].new_ones(
*intersection_outputs['min_depth'].size()[:-1], 1) * self.args.fixed_num_samples
else:
intersection_outputs['steps'] = dists.sum(-1) / self.encoder.step_size
return ray_start, ray_dir, intersection_outputs, hits, sampled_uv
def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, fine=False):
samples, all_results = super().raymarching(ray_start, ray_dir, intersection_outputs, encoder_states, fine)
all_results['voxel_edges'] = self.encoder.get_edge(ray_start, ray_dir, samples, encoder_states)
all_results['voxel_depth'] = samples['sampled_point_depth'][:, 0]
return samples, all_results
def prepare_hierarchical_sampling(self, intersection_outputs, samples, all_results):
intersection_outputs = super().prepare_hierarchical_sampling(intersection_outputs, samples, all_results)
if getattr(self.args, "fine_num_sample_ratio", 0) > 0:
intersection_outputs['steps'] = samples['sampled_point_voxel_idx'].ne(-1).sum(-1).float() * self.args.fine_num_sample_ratio
return intersection_outputs
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# we need fill_in for NSVF for background
S, V, P = sizes
fullsize = S * V * P
all_results['missed'] = fill_in((fullsize, ), hits, all_results['missed'], 1.0).view(S, V, P)
all_results['colors'] = fill_in((fullsize, 3), hits, all_results['colors'], 0.0).view(S, V, P, 3)
all_results['depths'] = fill_in((fullsize, ), hits, all_results['depths'], 0.0).view(S, V, P)
BG_DEPTH = self.field.bg_color.depth
bg_color = self.field.bg_color(all_results['colors'])
all_results['colors'] += all_results['missed'].unsqueeze(-1) * bg_color.reshape(fullsize, 3).view(S, V, P, 3)
all_results['depths'] += all_results['missed'] * BG_DEPTH
if 'normal' in all_results:
all_results['normal'] = fill_in((fullsize, 3), hits, all_results['normal'], 0.0).view(S, V, P, 3)
if 'voxel_depth' in all_results:
all_results['voxel_depth'] = fill_in((fullsize, ), hits, all_results['voxel_depth'], BG_DEPTH).view(S, V, P)
if 'voxel_edges' in all_results:
all_results['voxel_edges'] = fill_in((fullsize, 3), hits, all_results['voxel_edges'], 1.0).view(S, V, P, 3)
if 'feat_n2' in all_results:
all_results['feat_n2'] = fill_in((fullsize,), hits, all_results['feat_n2'], 0.0).view(S, V, P)
return all_results
def add_other_logs(self, all_results):
return {'voxs_log': item(self.encoder.voxel_size),
'stps_log': item(self.encoder.step_size),
'nvox_log': item(self.encoder.num_voxels)}
def _visualize(self, images, sample, output, state, **kwargs):
img_id, shape, view, width, name = state
images = super()._visualize(images, sample, output, state, **kwargs)
if 'voxel_edges' in output and output['voxel_edges'] is not None:
# voxel hitting visualization
images['{}_voxel/{}:HWC'.format(name, img_id)] = {
'img': output['voxel_edges'][shape, view].float(),
'min_val': 0,
'max_val': 1,
'weight':
compute_normal_map(
sample['ray_start'][shape, view].float(),
sample['ray_dir'][shape, view].float(),
output['voxel_depth'][shape, view].float(),
sample['extrinsics'][shape, view].float().inverse(),
width, proj=True)
}
if 'feat_n2' in output and output['feat_n2'] is not None:
images['{}_featn2/{}:HWC'.format(name, img_id)] = {
'img': output['feat_n2'][shape, view].float(),
'min_val': 0,
'max_val': 1
}
return images
@torch.no_grad()
def prune_voxels(self, th=0.5, train_stats=False):
self.encoder.pruning(self.field, th, train_stats=train_stats)
self.clean_caches()
@torch.no_grad()
def split_voxels(self):
logger.info("half the global voxel size {:.4f} -> {:.4f}".format(
self.encoder.voxel_size.item(), self.encoder.voxel_size.item() * .5))
self.encoder.splitting()
self.encoder.voxel_size *= .5
self.encoder.max_hits *= 1.5
self.clean_caches()
@torch.no_grad()
def reduce_stepsize(self):
logger.info("reduce the raymarching step size {:.4f} -> {:.4f}".format(
self.encoder.step_size.item(), self.encoder.step_size.item() * .5))
self.encoder.step_size *= .5
def clean_caches(self, reset=False):
self.encoder.clean_runtime_caches()
if reset:
self.encoder.reset_runtime_caches()
@register_model_architecture("nsvf", "nsvf_base")
def base_architecture(args):
# parameter needs to be changed
args.voxel_size = getattr(args, "voxel_size", None)
args.max_hits = getattr(args, "max_hits", 60)
args.raymarching_stepsize = getattr(args, "raymarching_stepsize", 0.01)
args.raymarching_stepsize_ratio = getattr(args, "raymarching_stepsize_ratio", 0.0)
# encoder default parameter
args.voxel_embed_dim = getattr(args, "voxel_embed_dim", 32)
args.voxel_path = getattr(args, "voxel_path", None)
args.initial_boundingbox = getattr(args, "initial_boundingbox", None)
# field
args.inputs_to_density = getattr(args, "inputs_to_density", "emb:6:32")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
args.feature_embed_dim = getattr(args, "feature_embed_dim", 256)
args.density_embed_dim = getattr(args, "density_embed_dim", 128)
args.texture_embed_dim = getattr(args, "texture_embed_dim", 256)
# API Update: fix the number of layers
args.feature_layers = getattr(args, "feature_layers", 1)
args.texture_layers = getattr(args, "texture_layers", 3)
args.background_stop_gradient = getattr(args, "background_stop_gradient", False)
args.background_depth = getattr(args, "background_depth", 5.0)
# raymarcher
args.discrete_regularization = getattr(args, "discrete_regularization", False)
args.deterministic_step = getattr(args, "deterministic_step", False)
args.raymarching_tolerance = getattr(args, "raymarching_tolerance", 0)
args.use_octree = getattr(args, "use_octree", False)
# reader
args.pixel_per_view = getattr(args, "pixel_per_view", 2048)
args.sampling_on_mask = getattr(args, "sampling_on_mask", 0.0)
args.sampling_at_center = getattr(args, "sampling_at_center", 1.0)
args.sampling_on_bbox = getattr(args, "sampling_on_bbox", False)
args.sampling_patch_size = getattr(args, "sampling_patch_size", 1)
args.sampling_skipping_size = getattr(args, "sampling_skipping_size", 1)
# others
args.chunk_size = getattr(args, "chunk_size", 64)
args.valid_chunk_size = getattr(args, "valid_chunk_size", 64)
@register_model_architecture("nsvf", "nsvf_xyz")
def nerf2_architecture(args):
args.voxel_embed_dim = getattr(args, "voxel_embed_dim", 0)
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:10, ray:4")
base_architecture(args)
@register_model_architecture("nsvf", "nsvf_nerf")
def nerf_style_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
args.feature_layers = getattr(args, "feature_layers", 6)
args.texture_layers = getattr(args, "texture_layers", 0)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_nerf_nov")
def nerf_noview_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256")
nerf_style_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyzn")
def nerf3_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:10, normal:4, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyz_nope")
def nerf3nope_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:0:3, sigma:0:1, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyzn_old")
def nerfold_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, normal:0:3, sigma:0:1, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyzn_nope")
def nerf2nope_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:0:3, normal:0:3, sigma:0:1, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyzn_noz")
def nerf3noz_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "pos:10, normal:4, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_embn")
def nerf4_architecture(args):
args.inputs_to_density = getattr(args, "inputs_to_density", "emb:6:32")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, normal:4, ray:4")
base_architecture(args)
@register_model_architecture("nsvf", "nsvf_emb0")
def nerf5_architecture(args):
args.voxel_embed_dim = getattr(args, "voxel_embed_dim", 384)
args.inputs_to_density = getattr(args, "inputs_to_density", "emb:0:384")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
base_architecture(args)
@register_model('disco_nsvf')
class DiscoNSVFModel(NSVFModel):
FIELD = "disentangled_radiance_field"
@register_model_architecture("disco_nsvf", "disco_nsvf")
def disco_nsvf_architecture(args):
args.compressed_light_dim = getattr(args, "compressed_light_dim", 64)
nerf3_architecture(args)
@register_model('multi_disco_nsvf')
class mDiscoNSVFModel(NSVFModel):
ENCODER = "multi_sparsevoxel_encoder"
FIELD = "disentangled_radiance_field"
@register_model_architecture("multi_disco_nsvf", "multi_disco_nsvf")
def mdisco_nsvf_architecture(args):
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10, context:0:256")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:10, normal:4, ray:4, context:0:256")
disco_nsvf_architecture(args)
@register_model('sdf_nsvf')
class SDFNSVFModel(NSVFModel):
FIELD = "sdf_radiance_field"
@register_model_architecture("sdf_nsvf", "sdf_nsvf")
def sdf_nsvf_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
nerf2nope_architecture(args)
@register_model('sdf_nsvf_sfx')
class SDFSFXNSVFModel(SDFNSVFModel):
FIELD = "sdf_radiance_field"
RAYMARCHER = "surface_volume_rendering"
@register_model_architecture("sdf_nsvf_sfx", "sdf_nsvf_sfx")
def sdf_nsvfsfx_architecture(args):
sdf_nsvf_architecture(args)
| 14,499 | 43.072948 | 135 |
py
|
NSVF
|
NSVF-main/fairnr/models/nmf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import torch
from fairseq.models import (
register_model,
register_model_architecture
)
from fairnr.models.nsvf import NSVFModel
@register_model('nmf')
class NMFModel(NSVFModel):
"""
Experimental code: Neural Mesh Field
"""
ENCODER = 'triangle_mesh_encoder'
@torch.no_grad()
def prune_voxels(self, *args, **kwargs):
pass
@torch.no_grad()
def split_voxels(self):
pass
# logger.info("half the global cage size {:.4f} -> {:.4f}".format(
# self.encoder.cage_size.item(), self.encoder.cage_size.item() * .5))
# self.encoder.cage_size *= .5
@register_model_architecture("nmf", "nmf_base")
def base_architecture(args):
# parameter needs to be changed
args.max_hits = getattr(args, "max_hits", 60)
args.raymarching_stepsize = getattr(args, "raymarching_stepsize", 0.01)
# encoder default parameter
args.voxel_embed_dim = getattr(args, "voxel_embed_dim", 0)
args.voxel_path = getattr(args, "voxel_path", None)
# field
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:10, ray:4")
args.feature_embed_dim = getattr(args, "feature_embed_dim", 256)
args.density_embed_dim = getattr(args, "density_embed_dim", 128)
args.texture_embed_dim = getattr(args, "texture_embed_dim", 256)
args.feature_layers = getattr(args, "feature_layers", 1)
args.texture_layers = getattr(args, "texture_layers", 3)
args.background_stop_gradient = getattr(args, "background_stop_gradient", False)
args.background_depth = getattr(args, "background_depth", 5.0)
# raymarcher
args.discrete_regularization = getattr(args, "discrete_regularization", False)
args.deterministic_step = getattr(args, "deterministic_step", False)
args.raymarching_tolerance = getattr(args, "raymarching_tolerance", 0)
# reader
args.pixel_per_view = getattr(args, "pixel_per_view", 2048)
args.sampling_on_mask = getattr(args, "sampling_on_mask", 0.0)
args.sampling_at_center = getattr(args, "sampling_at_center", 1.0)
args.sampling_on_bbox = getattr(args, "sampling_on_bbox", False)
args.sampling_patch_size = getattr(args, "sampling_patch_size", 1)
args.sampling_skipping_size = getattr(args, "sampling_skipping_size", 1)
# others
args.chunk_size = getattr(args, "chunk_size", 64)
@register_model_architecture("nmf", "nmf_nerf")
def nerf_style_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
args.feature_layers = getattr(args, "feature_layers", 6)
args.texture_layers = getattr(args, "texture_layers", 0)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
base_architecture(args)
| 3,148 | 36.939759 | 92 |
py
|
NSVF
|
NSVF-main/fairnr/models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('fairnr.models.' + model_name)
| 651 | 39.75 | 111 |
py
|
NSVF
|
NSVF-main/fairnr/clib/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import os, sys
import torch
import torch.nn.functional as F
from torch.autograd import Function
import torch.nn as nn
import sys
import numpy as np
try:
import builtins
except:
import __builtin__ as builtins
try:
import fairnr.clib._ext as _ext
except ImportError:
pass
# raise ImportError(
# "Could not import _ext module.\n"
# "Please see the setup instructions in the README"
# )
MAX_DEPTH = 10000.0
class BallRayIntersect(Function):
@staticmethod
def forward(ctx, radius, n_max, points, ray_start, ray_dir):
inds, min_depth, max_depth = _ext.ball_intersect(
ray_start.float(), ray_dir.float(), points.float(), radius, n_max)
min_depth = min_depth.type_as(ray_start)
max_depth = max_depth.type_as(ray_start)
ctx.mark_non_differentiable(inds)
ctx.mark_non_differentiable(min_depth)
ctx.mark_non_differentiable(max_depth)
return inds, min_depth, max_depth
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None
ball_ray_intersect = BallRayIntersect.apply
class AABBRayIntersect(Function):
@staticmethod
def forward(ctx, voxelsize, n_max, points, ray_start, ray_dir):
# HACK: speed-up ray-voxel intersection by batching...
G = min(2048, int(2 * 10 ** 9 / points.numel())) # HACK: avoid out-of-memory
S, N = ray_start.shape[:2]
K = int(np.ceil(N / G))
H = K * G
if H > N:
ray_start = torch.cat([ray_start, ray_start[:, :H-N]], 1)
ray_dir = torch.cat([ray_dir, ray_dir[:, :H-N]], 1)
ray_start = ray_start.reshape(S * G, K, 3)
ray_dir = ray_dir.reshape(S * G, K, 3)
points = points.expand(S * G, *points.size()[1:]).contiguous()
inds, min_depth, max_depth = _ext.aabb_intersect(
ray_start.float(), ray_dir.float(), points.float(), voxelsize, n_max)
min_depth = min_depth.type_as(ray_start)
max_depth = max_depth.type_as(ray_start)
inds = inds.reshape(S, H, -1)
min_depth = min_depth.reshape(S, H, -1)
max_depth = max_depth.reshape(S, H, -1)
if H > N:
inds = inds[:, :N]
min_depth = min_depth[:, :N]
max_depth = max_depth[:, :N]
ctx.mark_non_differentiable(inds)
ctx.mark_non_differentiable(min_depth)
ctx.mark_non_differentiable(max_depth)
return inds, min_depth, max_depth
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None
aabb_ray_intersect = AABBRayIntersect.apply
class SparseVoxelOctreeRayIntersect(Function):
@staticmethod
def forward(ctx, voxelsize, n_max, points, children, ray_start, ray_dir):
G = min(2048, int(2 * 10 ** 9 / (points.numel() + children.numel()))) # HACK: avoid out-of-memory
S, N = ray_start.shape[:2]
K = int(np.ceil(N / G))
H = K * G
if H > N:
ray_start = torch.cat([ray_start, ray_start[:, :H-N]], 1)
ray_dir = torch.cat([ray_dir, ray_dir[:, :H-N]], 1)
ray_start = ray_start.reshape(S * G, K, 3)
ray_dir = ray_dir.reshape(S * G, K, 3)
points = points.expand(S * G, *points.size()[1:]).contiguous()
children = children.expand(S * G, *children.size()[1:]).contiguous()
inds, min_depth, max_depth = _ext.svo_intersect(
ray_start.float(), ray_dir.float(), points.float(), children.int(), voxelsize, n_max)
min_depth = min_depth.type_as(ray_start)
max_depth = max_depth.type_as(ray_start)
inds = inds.reshape(S, H, -1)
min_depth = min_depth.reshape(S, H, -1)
max_depth = max_depth.reshape(S, H, -1)
if H > N:
inds = inds[:, :N]
min_depth = min_depth[:, :N]
max_depth = max_depth[:, :N]
ctx.mark_non_differentiable(inds)
ctx.mark_non_differentiable(min_depth)
ctx.mark_non_differentiable(max_depth)
return inds, min_depth, max_depth
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None
svo_ray_intersect = SparseVoxelOctreeRayIntersect.apply
class TriangleRayIntersect(Function):
@staticmethod
def forward(ctx, cagesize, blur_ratio, n_max, points, faces, ray_start, ray_dir):
# HACK: speed-up ray-voxel intersection by batching...
G = min(2048, int(2 * 10 ** 9 / (3 * faces.numel()))) # HACK: avoid out-of-memory
S, N = ray_start.shape[:2]
K = int(np.ceil(N / G))
H = K * G
if H > N:
ray_start = torch.cat([ray_start, ray_start[:, :H-N]], 1)
ray_dir = torch.cat([ray_dir, ray_dir[:, :H-N]], 1)
ray_start = ray_start.reshape(S * G, K, 3)
ray_dir = ray_dir.reshape(S * G, K, 3)
face_points = F.embedding(faces.reshape(-1, 3), points.reshape(-1, 3))
face_points = face_points.unsqueeze(0).expand(S * G, *face_points.size()).contiguous()
inds, depth, uv = _ext.triangle_intersect(
ray_start.float(), ray_dir.float(), face_points.float(), cagesize, blur_ratio, n_max)
depth = depth.type_as(ray_start)
uv = uv.type_as(ray_start)
inds = inds.reshape(S, H, -1)
depth = depth.reshape(S, H, -1, 3)
uv = uv.reshape(S, H, -1)
if H > N:
inds = inds[:, :N]
depth = depth[:, :N]
uv = uv[:, :N]
ctx.mark_non_differentiable(inds)
ctx.mark_non_differentiable(depth)
ctx.mark_non_differentiable(uv)
return inds, depth, uv
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None, None
triangle_ray_intersect = TriangleRayIntersect.apply
class UniformRaySampling(Function):
@staticmethod
def forward(ctx, pts_idx, min_depth, max_depth, step_size, max_ray_length, deterministic=False):
G, N, P = 256, pts_idx.size(0), pts_idx.size(1)
H = int(np.ceil(N / G)) * G
if H > N:
pts_idx = torch.cat([pts_idx, pts_idx[:H-N]], 0)
min_depth = torch.cat([min_depth, min_depth[:H-N]], 0)
max_depth = torch.cat([max_depth, max_depth[:H-N]], 0)
pts_idx = pts_idx.reshape(G, -1, P)
min_depth = min_depth.reshape(G, -1, P)
max_depth = max_depth.reshape(G, -1, P)
# pre-generate noise
max_steps = int(max_ray_length / step_size)
max_steps = max_steps + min_depth.size(-1) * 2
noise = min_depth.new_zeros(*min_depth.size()[:-1], max_steps)
if deterministic:
noise += 0.5
else:
noise = noise.uniform_()
# call cuda function
sampled_idx, sampled_depth, sampled_dists = _ext.uniform_ray_sampling(
pts_idx, min_depth.float(), max_depth.float(), noise.float(), step_size, max_steps)
sampled_depth = sampled_depth.type_as(min_depth)
sampled_dists = sampled_dists.type_as(min_depth)
sampled_idx = sampled_idx.reshape(H, -1)
sampled_depth = sampled_depth.reshape(H, -1)
sampled_dists = sampled_dists.reshape(H, -1)
if H > N:
sampled_idx = sampled_idx[: N]
sampled_depth = sampled_depth[: N]
sampled_dists = sampled_dists[: N]
max_len = sampled_idx.ne(-1).sum(-1).max()
sampled_idx = sampled_idx[:, :max_len]
sampled_depth = sampled_depth[:, :max_len]
sampled_dists = sampled_dists[:, :max_len]
ctx.mark_non_differentiable(sampled_idx)
ctx.mark_non_differentiable(sampled_depth)
ctx.mark_non_differentiable(sampled_dists)
return sampled_idx, sampled_depth, sampled_dists
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None, None
uniform_ray_sampling = UniformRaySampling.apply
class InverseCDFRaySampling(Function):
@staticmethod
def forward(ctx, pts_idx, min_depth, max_depth, probs, steps, fixed_step_size=-1, deterministic=False):
G, N, P = 200, pts_idx.size(0), pts_idx.size(1)
H = int(np.ceil(N / G)) * G
if H > N:
pts_idx = torch.cat([pts_idx, pts_idx[:1].expand(H-N, P)], 0)
min_depth = torch.cat([min_depth, min_depth[:1].expand(H-N, P)], 0)
max_depth = torch.cat([max_depth, max_depth[:1].expand(H-N, P)], 0)
probs = torch.cat([probs, probs[:1].expand(H-N, P)], 0)
steps = torch.cat([steps, steps[:1].expand(H-N)], 0)
# print(G, P, np.ceil(N / G), N, H, pts_idx.shape, min_depth.device)
pts_idx = pts_idx.reshape(G, -1, P)
min_depth = min_depth.reshape(G, -1, P)
max_depth = max_depth.reshape(G, -1, P)
probs = probs.reshape(G, -1, P)
steps = steps.reshape(G, -1)
# pre-generate noise
max_steps = steps.ceil().long().max() + P
noise = min_depth.new_zeros(*min_depth.size()[:-1], max_steps)
if deterministic:
noise += 0.5
else:
noise = noise.uniform_().clamp(min=0.001, max=0.999) # in case
# call cuda function
chunk_size = 4 * G # to avoid oom?
results = [
_ext.inverse_cdf_sampling(
pts_idx[:, i:i+chunk_size].contiguous(),
min_depth.float()[:, i:i+chunk_size].contiguous(),
max_depth.float()[:, i:i+chunk_size].contiguous(),
noise.float()[:, i:i+chunk_size].contiguous(),
probs.float()[:, i:i+chunk_size].contiguous(),
steps.float()[:, i:i+chunk_size].contiguous(),
fixed_step_size)
for i in range(0, min_depth.size(1), chunk_size)
]
sampled_idx, sampled_depth, sampled_dists = [
torch.cat([r[i] for r in results], 1)
for i in range(3)
]
sampled_depth = sampled_depth.type_as(min_depth)
sampled_dists = sampled_dists.type_as(min_depth)
sampled_idx = sampled_idx.reshape(H, -1)
sampled_depth = sampled_depth.reshape(H, -1)
sampled_dists = sampled_dists.reshape(H, -1)
if H > N:
sampled_idx = sampled_idx[: N]
sampled_depth = sampled_depth[: N]
sampled_dists = sampled_dists[: N]
max_len = sampled_idx.ne(-1).sum(-1).max()
sampled_idx = sampled_idx[:, :max_len]
sampled_depth = sampled_depth[:, :max_len]
sampled_dists = sampled_dists[:, :max_len]
ctx.mark_non_differentiable(sampled_idx)
ctx.mark_non_differentiable(sampled_depth)
ctx.mark_non_differentiable(sampled_dists)
return sampled_idx, sampled_depth, sampled_dists
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None, None, None
inverse_cdf_sampling = InverseCDFRaySampling.apply
# back-up for ray point sampling
@torch.no_grad()
def _parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, deterministic=False):
# uniform sampling
_min_depth = min_depth.min(1)[0]
_max_depth = max_depth.masked_fill(max_depth.eq(MAX_DEPTH), 0).max(1)[0]
max_ray_length = (_max_depth - _min_depth).max()
delta = torch.arange(int(max_ray_length / MARCH_SIZE), device=min_depth.device, dtype=min_depth.dtype)
delta = delta[None, :].expand(min_depth.size(0), delta.size(-1))
if deterministic:
delta = delta + 0.5
else:
delta = delta + delta.clone().uniform_().clamp(min=0.01, max=0.99)
delta = delta * MARCH_SIZE
sampled_depth = min_depth[:, :1] + delta
sampled_idx = (sampled_depth[:, :, None] >= min_depth[:, None, :]).sum(-1) - 1
sampled_idx = pts_idx.gather(1, sampled_idx)
# include all boundary points
sampled_depth = torch.cat([min_depth, max_depth, sampled_depth], -1)
sampled_idx = torch.cat([pts_idx, pts_idx, sampled_idx], -1)
# reorder
sampled_depth, ordered_index = sampled_depth.sort(-1)
sampled_idx = sampled_idx.gather(1, ordered_index)
sampled_dists = sampled_depth[:, 1:] - sampled_depth[:, :-1] # distances
sampled_depth = .5 * (sampled_depth[:, 1:] + sampled_depth[:, :-1]) # mid-points
# remove all invalid depths
min_ids = (sampled_depth[:, :, None] >= min_depth[:, None, :]).sum(-1) - 1
max_ids = (sampled_depth[:, :, None] >= max_depth[:, None, :]).sum(-1)
sampled_depth.masked_fill_(
(max_ids.ne(min_ids)) |
(sampled_depth > _max_depth[:, None]) |
(sampled_dists == 0.0)
, MAX_DEPTH)
sampled_depth, ordered_index = sampled_depth.sort(-1) # sort again
sampled_masks = sampled_depth.eq(MAX_DEPTH)
num_max_steps = (~sampled_masks).sum(-1).max()
sampled_depth = sampled_depth[:, :num_max_steps]
sampled_dists = sampled_dists.gather(1, ordered_index).masked_fill_(sampled_masks, 0.0)[:, :num_max_steps]
sampled_idx = sampled_idx.gather(1, ordered_index).masked_fill_(sampled_masks, -1)[:, :num_max_steps]
return sampled_idx, sampled_depth, sampled_dists
@torch.no_grad()
def parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, deterministic=False):
chunk_size=4096
full_size = min_depth.shape[0]
if full_size <= chunk_size:
return _parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, deterministic=deterministic)
outputs = zip(*[
_parallel_ray_sampling(
MARCH_SIZE,
pts_idx[i:i+chunk_size], min_depth[i:i+chunk_size], max_depth[i:i+chunk_size],
deterministic=deterministic)
for i in range(0, full_size, chunk_size)])
sampled_idx, sampled_depth, sampled_dists = outputs
def padding_points(xs, pad):
if len(xs) == 1:
return xs[0]
maxlen = max([x.size(1) for x in xs])
full_size = sum([x.size(0) for x in xs])
xt = xs[0].new_ones(full_size, maxlen).fill_(pad)
st = 0
for i in range(len(xs)):
xt[st: st + xs[i].size(0), :xs[i].size(1)] = xs[i]
st += xs[i].size(0)
return xt
sampled_idx = padding_points(sampled_idx, -1)
sampled_depth = padding_points(sampled_depth, MAX_DEPTH)
sampled_dists = padding_points(sampled_dists, 0.0)
return sampled_idx, sampled_depth, sampled_dists
| 14,842 | 37.553247 | 110 |
py
|
NSVF
|
NSVF-main/fairnr/data/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import functools
import cv2
import math
import numpy as np
import imageio
from glob import glob
import os
import copy
import shutil
import skimage.metrics
import pandas as pd
import pylab as plt
import fairseq.distributed_utils as du
from plyfile import PlyData, PlyElement
from fairseq.meters import StopwatchMeter
def get_rank():
try:
return du.get_rank()
except AssertionError:
return 0
def get_world_size():
try:
return du.get_world_size()
except AssertionError:
return 1
def parse_views(view_args):
output = []
try:
xx = view_args.split(':')
ids = xx[0].split(',')
for id in ids:
if '..' in id:
a, b = id.split('..')
output += list(range(int(a), int(b)))
else:
output += [int(id)]
if len(xx) > 1:
output = output[::int(xx[-1])]
except Exception as e:
raise Exception("parse view args error: {}".format(e))
return output
def get_uv(H, W, h, w):
"""
H, W: real image (intrinsics)
h, w: resized image
"""
uv = np.flip(np.mgrid[0: h, 0: w], axis=0).astype(np.float32)
uv[0] = uv[0] * float(W / w)
uv[1] = uv[1] * float(H / h)
return uv, [float(H / h), float(W / w)]
def load_rgb(
path,
resolution=None,
with_alpha=True,
bg_color=[1.0, 1.0, 1.0],
min_rgb=-1,
interpolation='AREA'):
if with_alpha:
img = imageio.imread(path) # RGB-ALPHA
else:
img = imageio.imread(path)[:, :, :3]
img = skimage.img_as_float32(img).astype('float32')
H, W, D = img.shape
h, w = resolution
if D == 3:
img = np.concatenate([img, np.ones((img.shape[0], img.shape[1], 1))], -1).astype('float32')
uv, ratio = get_uv(H, W, h, w)
if (h < H) or (w < W):
# img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA).astype('float32')
if min_rgb == -1: # 0, 1 --> -1, 1
img[:, :, :3] -= 0.5
img[:, :, :3] *= 2.
img[:, :, :3] = img[:, :, :3] * img[:, :, 3:] + np.asarray(bg_color)[None, None, :] * (1 - img[:, :, 3:])
img[:, :, 3] = img[:, :, 3] * (img[:, :, :3] != np.asarray(bg_color)[None, None, :]).any(-1)
img = img.transpose(2, 0, 1)
return img, uv, ratio
def load_depth(path, resolution=None, depth_plane=5):
if path is None:
return None
img = cv2.imread(path, cv2.IMREAD_UNCHANGED).astype(np.float32)
# ret, img = cv2.threshold(img, depth_plane, depth_plane, cv2.THRESH_TRUNC)
H, W = img.shape[:2]
h, w = resolution
if (h < H) or (w < W):
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
#img = cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR)
if len(img.shape) ==3:
img = img[:,:,:1]
img = img.transpose(2,0,1)
else:
img = img[None,:,:]
return img
def load_mask(path, resolution=None):
if path is None:
return None
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
h, w = resolution
H, W = img.shape[:2]
if (h < H) or (w < W):
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
img = img / (img.max() + 1e-7)
return img
def load_matrix(path):
lines = [[float(w) for w in line.strip().split()] for line in open(path)]
if len(lines[0]) == 2:
lines = lines[1:]
if len(lines[-1]) == 2:
lines = lines[:-1]
return np.array(lines).astype(np.float32)
def load_intrinsics(filepath, resized_width=None, invert_y=False):
try:
intrinsics = load_matrix(filepath)
if intrinsics.shape[0] == 3 and intrinsics.shape[1] == 3:
_intrinsics = np.zeros((4, 4), np.float32)
_intrinsics[:3, :3] = intrinsics
_intrinsics[3, 3] = 1
intrinsics = _intrinsics
if intrinsics.shape[0] == 1 and intrinsics.shape[1] == 16:
intrinsics = intrinsics.reshape(4, 4)
return intrinsics
except ValueError:
pass
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy, _ = map(float, file.readline().split())
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic
def unflatten_img(img, width=512):
sizes = img.size()
height = sizes[-1] // width
return img.reshape(*sizes[:-1], height, width)
def square_crop_img(img):
if img.shape[0] == img.shape[1]:
return img # already square
min_dim = np.amin(img.shape[:2])
center_coord = np.array(img.shape[:2]) // 2
img = img[center_coord[0] - min_dim // 2:center_coord[0] + min_dim // 2,
center_coord[1] - min_dim // 2:center_coord[1] + min_dim // 2]
return img
def sample_pixel_from_image(
num_pixel, num_sample,
mask=None, ratio=1.0,
use_bbox=False,
center_ratio=1.0,
width=512,
patch_size=1):
if patch_size > 1:
assert (num_pixel % (patch_size * patch_size) == 0) \
and (num_sample % (patch_size * patch_size) == 0), "size must match"
_num_pixel = num_pixel // (patch_size * patch_size)
_num_sample = num_sample // (patch_size * patch_size)
height = num_pixel // width
_mask = None if mask is None else \
mask.reshape(height, width).reshape(
height//patch_size, patch_size, width//patch_size, patch_size
).any(1).any(-1).reshape(-1)
_width = width // patch_size
_out = sample_pixel_from_image(_num_pixel, _num_sample, _mask, ratio, use_bbox, _width)
_x, _y = _out % _width, _out // _width
x, y = _x * patch_size, _y * patch_size
x = x[:, None, None] + np.arange(patch_size)[None, :, None]
y = y[:, None, None] + np.arange(patch_size)[None, None, :]
out = x + y * width
return out.reshape(-1)
if center_ratio < 1.0:
r = (1 - center_ratio) / 2.0
H, W = num_pixel // width, width
mask0 = np.zeros((H, W))
mask0[int(H * r): H - int(H * r), int(W * r): W - int(W * r)] = 1
mask0 = mask0.reshape(-1)
if mask is None:
mask = mask0
else:
mask = mask * mask0
if mask is not None:
mask = (mask > 0.0).astype('float32')
if (mask is None) or \
(ratio <= 0.0) or \
(mask.sum() == 0) or \
((1 - mask).sum() == 0):
return np.random.choice(num_pixel, num_sample)
if use_bbox:
mask = mask.reshape(-1, width)
x, y = np.where(mask == 1)
mask = np.zeros_like(mask)
mask[x.min(): x.max()+1, y.min(): y.max()+1] = 1.0
mask = mask.reshape(-1)
try:
probs = mask * ratio / (mask.sum()) + (1 - mask) / (num_pixel - mask.sum()) * (1 - ratio)
# x = np.random.choice(num_pixel, num_sample, True, p=probs)
return np.random.choice(num_pixel, num_sample, True, p=probs)
except Exception:
return np.random.choice(num_pixel, num_sample)
def colormap(dz):
return plt.cm.jet(dz)
# return plt.cm.viridis(dz)
# return plt.cm.gray(dz)
def recover_image(img, min_val=-1, max_val=1, width=512, bg=None, weight=None, raw=False):
if raw: return img
sizes = img.size()
height = sizes[0] // width
img = img.float().to('cpu')
if len(sizes) == 1 and (bg is not None):
bg_mask = img.eq(bg)[:, None].type_as(img)
img = ((img - min_val) / (max_val - min_val)).clamp(min=0, max=1)
if len(sizes) == 1:
img = torch.from_numpy(colormap(img.numpy())[:, :3])
if weight is not None:
weight = weight.float().to('cpu')
img = img * weight[:, None]
if bg is not None:
img = img * (1 - bg_mask) + bg_mask
img = img.reshape(height, width, -1)
return img
def write_images(writer, images, updates):
for tag in images:
img = images[tag]
tag, dataform = tag.split(':')
writer.add_image(tag, img, updates, dataformats=dataform)
def compute_psnr(p, t):
"""Compute PSNR of model image predictions.
:param prediction: Return value of forward pass.
:param ground_truth: Ground truth.
:return: (psnr, ssim): tuple of floats
"""
ssim = skimage.metrics.structural_similarity(p, t, multichannel=True, data_range=1)
psnr = skimage.metrics.peak_signal_noise_ratio(p, t, data_range=1)
return ssim, psnr
def save_point_cloud(filename, xyz, rgb=None):
if rgb is None:
vertex = np.array([(xyz[k, 0], xyz[k, 1], xyz[k, 2]) for k in range(xyz.shape[0])],
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
else:
vertex = np.array([(xyz[k, 0], xyz[k, 1], xyz[k, 2], rgb[k, 0], rgb[k, 1], rgb[k, 2]) for k in range(xyz.shape[0])],
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
# PlyData([PlyElement.describe(vertex, 'vertex')], text=True).write(filename)
# from fairseq import pdb; pdb.set_trace()
PlyData([PlyElement.describe(vertex, 'vertex')]).write(open(filename, 'wb'))
class InfIndex(object):
def __init__(self, index_list, shuffle=False):
self.index_list = index_list
self.size = len(index_list)
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
if self.shuffle:
self._perm = np.random.permutation(self.index_list).tolist()
else:
self._perm = copy.deepcopy(self.index_list)
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return self.size
class Timer(StopwatchMeter):
def __enter__(self):
"""Start a new timer as a context manager"""
self.start()
return self
def __exit__(self, *exc_info):
"""Stop the context manager timer"""
self.stop()
class GPUTimer(object):
def __enter__(self):
"""Start a new timer as a context manager"""
self.start = torch.cuda.Event(enable_timing=True)
self.end = torch.cuda.Event(enable_timing=True)
self.start.record()
self.sum = 0
return self
def __exit__(self, *exc_info):
"""Stop the context manager timer"""
self.end.record()
torch.cuda.synchronize()
self.sum = self.start.elapsed_time(self.end) / 1000.
| 11,063 | 28.902703 | 125 |
py
|
NSVF
|
NSVF-main/fairnr/data/shape_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, glob
import copy
import numpy as np
import torch
import logging
from collections import defaultdict
from fairseq.data import FairseqDataset, BaseWrapperDataset
from . import data_utils, geometry, trajectory
logger = logging.getLogger(__name__)
class ShapeDataset(FairseqDataset):
"""
A dataset that only returns data per shape
"""
def __init__(self,
paths,
preload=True,
repeat=1,
subsample_valid=-1,
ids=None):
if os.path.isdir(paths):
self.paths = [paths]
else:
self.paths = [line.strip() for line in open(paths)]
self.subsample_valid = subsample_valid
self.total_num_shape = len(self.paths)
self.cache = None
self.repeat = repeat
# -- load per-shape data
_data_per_shape = {}
_data_per_shape['shape'] = list(range(len(self.paths)))
_ixts = self.find_intrinsics()
_glbs = self.find_global()
if len(_ixts) > 0:
_data_per_shape['ixt'] = _ixts
if len(_glbs) > 0:
_data_per_shape['glb'] = _glbs
if self.subsample_valid > -1:
for key in _data_per_shape:
_data_per_shape[key] = _data_per_shape[key][::self.subsample_valid]
self.paths = self.paths[::self.subsample_valid]
self.total_num_shape = len(self.paths)
# group the data..
data_list = []
for r in range(repeat):
# HACK: making several copies to enable multi-GPU usage.
if r == 0 and preload:
self.cache = []
logger.info('pre-load the dataset into memory.')
for id in range(self.total_num_shape):
element = {}
for key in _data_per_shape:
element[key] = _data_per_shape[key][id]
data_list.append(element)
if r == 0 and preload:
self.cache += [self._load_batch(data_list, id)]
# group the data together
self.data = data_list
def find_intrinsics(self):
ixt_list = []
for path in self.paths:
if os.path.exists(path + '/intrinsic.txt'):
ixt_list.append(path + '/intrinsic.txt')
elif os.path.exists(path + '/intrinsics.txt'):
ixt_list.append(path + '/intrinsics.txt')
return ixt_list
def find_global(self):
glb_list = []
for path in self.paths:
if os.path.exists(path + '/global.txt'):
glb_list.append(path + '/global.txt')
return glb_list
def _load_shape(self, packed_data):
intrinsics = data_utils.load_intrinsics(packed_data['ixt']).astype('float32') \
if packed_data.get('ixt', None) is not None else None
shape_id = packed_data['shape']
shape_data = {'intrinsics': intrinsics, 'id': shape_id}
if packed_data.get('glb', None) is not None: # additional global feature (if any)
shape_data['global_index'] = np.loadtxt(packed_data['glb']).astype('int64')
return shape_data
def _load_batch(self, data, index):
return index, self._load_shape(data[index])
def __getitem__(self, index):
if self.cache is not None:
return self.cache[index % self.total_num_shape][0], \
self.cache[index % self.total_num_shape][1]
return self._load_batch(self.data, index)
def __len__(self):
return len(self.data)
def num_tokens(self, index):
return 1
def _collater(self, samples):
results = {}
results['shape'] = torch.from_numpy(np.array([s[0] for s in samples]))
for key in samples[0][1]:
if samples[0][1][key] is not None:
results[key] = torch.from_numpy(
np.array([s[1][key] for s in samples]))
else:
results[key] = None
return results
def collater(self, samples):
try:
results = self._collater(samples)
except IndexError:
results = None
return results
class ShapeViewDataset(ShapeDataset):
"""
A dataset contains a series of images renderred offline for an object.
"""
def __init__(self,
paths,
views,
num_view,
subsample_valid=-1,
resolution=None,
load_depth=False,
load_mask=False,
train=True,
preload=True,
repeat=1,
binarize=True,
bg_color="1,1,1",
min_color=-1,
ids=None):
super().__init__(paths, False, repeat, subsample_valid, ids)
self.train = train
self.load_depth = load_depth
self.load_mask = load_mask
self.views = views
self.num_view = num_view
if isinstance(resolution, str):
self.resolution = [int(r) for r in resolution.split('x')]
else:
self.resolution = [resolution, resolution]
self.world2camera = True
self.cache_view = None
bg_color = [float(b) for b in bg_color.split(',')] \
if isinstance(bg_color, str) else [bg_color]
if min_color == -1:
bg_color = [b * 2 - 1 for b in bg_color]
if len(bg_color) == 1:
bg_color = bg_color + bg_color + bg_color
self.bg_color = bg_color
self.min_color = min_color
self.apply_mask_color = (self.bg_color[0] >= -1) & (self.bg_color[0] <= 1) # if need to apply
# -- load per-view data
_data_per_view = {}
_data_per_view['rgb'] = self.find_rgb()
_data_per_view['ext'] = self.find_extrinsics()
if self.find_intrinsics_per_view() is not None:
_data_per_view['ixt_v'] = self.find_intrinsics_per_view()
if self.load_depth:
_data_per_view['dep'] = self.find_depth()
if self.load_mask:
_data_per_view['mask'] = self.find_mask()
_data_per_view['view'] = self.summary_view_data(_data_per_view)
# group the data.
_index = 0
for r in range(repeat):
# HACK: making several copies to enable multi-GPU usage.
if r == 0 and preload:
self.cache = []
logger.info('pre-load the dataset into memory.')
for id in range(self.total_num_shape):
element = {}
total_num_view = len(_data_per_view['rgb'][id])
perm_ids = np.random.permutation(total_num_view) if train else np.arange(total_num_view)
for key in _data_per_view:
element[key] = [_data_per_view[key][id][i] for i in perm_ids]
self.data[_index].update(element)
if r == 0 and preload:
phase_name = f"{'train' if self.train else 'valid'}" + \
f".{self.resolution[0]}x{self.resolution[1]}" + \
f"{'.d' if load_depth else ''}" + \
f"{'.m' if load_mask else ''}" + \
f"{'b' if not self.apply_mask_color else ''}" + \
"_full"
logger.info("preload {}-{}".format(id, phase_name))
if binarize:
cache = self._load_binary(id, np.arange(total_num_view), phase_name)
else:
cache = self._load_batch(self.data, id, np.arange(total_num_view))
self.cache += [cache]
_index += 1
# group the data together
self.data_index = []
for i, d in enumerate(self.data):
if self.train:
index_list = list(range(len(d['rgb'])))
self.data_index.append(
data_utils.InfIndex(index_list, shuffle=True)
)
else:
copy_id = i // self.total_num_shape
index_list = []
for j in range(copy_id * num_view, copy_id * num_view + num_view):
index_list.append(j % len(d['rgb']))
self.data_index.append(
data_utils.InfIndex(index_list, shuffle=False)
)
def _load_binary(self, id, views, phase='train'):
root = os.path.dirname(self.data[id]['shape'])
npzfile = os.path.join(root, '{}.npz'.format(phase))
try:
with np.load(npzfile, allow_pickle=True) as f:
return f['cache']
except Exception:
cache = self._load_batch(self.data, id, views)
if data_utils.get_rank() == 0:
np.savez(npzfile, cache=cache)
return cache
def select(self, file_list):
if len(file_list[0]) == 0:
raise FileNotFoundError
return [[files[i] for i in self.views] for files in file_list]
def find_rgb(self):
try:
return self.select([sorted(glob.glob(path + '/rgb/*.*g')) for path in self.paths])
except FileNotFoundError:
try:
return self.select([sorted(glob.glob(path + '/color/*.*g')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find rendered images.")
def find_depth(self):
try:
return self.select([sorted(glob.glob(path + '/depth/*.exr')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find estimated depths images")
def find_mask(self):
try:
return self.select([sorted(glob.glob(path + '/mask/*')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find precomputed mask images")
def find_extrinsics(self):
try:
return self.select([sorted(glob.glob(path + '/extrinsic/*.txt')) for path in self.paths])
except FileNotFoundError:
try:
self.world2camera = False
return self.select([sorted(glob.glob(path + '/pose/*.txt')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError('world2camera or camera2world matrices not found.')
def find_intrinsics_per_view(self):
try:
return self.select([sorted(glob.glob(path + '/intrinsic/*.txt')) for path in self.paths])
except FileNotFoundError:
return None
def summary_view_data(self, _data_per_view):
keys = [k for k in _data_per_view if _data_per_view[k] is not None]
num_of_objects = len(_data_per_view[keys[0]])
for k in range(num_of_objects):
assert len(set([len(_data_per_view[key][k]) for key in keys])) == 1, "numer of views must be consistent."
return [list(range(len(_data_per_view[keys[0]][k]))) for k in range(num_of_objects)]
def num_tokens(self, index):
return self.num_view
def _load_view(self, packed_data, view_idx):
image, uv, ratio = data_utils.load_rgb(
packed_data['rgb'][view_idx],
resolution=self.resolution,
bg_color=self.bg_color,
min_rgb=self.min_color)
rgb, alpha = image[:3], image[3] # C x H x W for RGB
extrinsics = data_utils.load_matrix(packed_data['ext'][view_idx])
extrinsics = geometry.parse_extrinsics(extrinsics, self.world2camera).astype('float32') # this is C2W
intrinsics = data_utils.load_intrinsics(packed_data['ixt_v'][view_idx]).astype('float32') \
if packed_data.get('ixt_v', None) is not None else None
z, mask = None, None
if packed_data.get('dep', None) is not None:
z = data_utils.load_depth(packed_data['dep'][view_idx], resolution=self.resolution)
if packed_data.get('mask', None) is not None:
mask = data_utils.load_mask(packed_data['mask'][view_idx], resolution=self.resolution)
if self.apply_mask_color: # we can also not apply mask
rgb = rgb * mask[None, :, :] + (1 - mask[None, :, :]) * np.asarray(self.bg_color)[:, None, None]
return {
'path': packed_data['rgb'][view_idx],
'view': view_idx,
'uv': uv.reshape(2, -1),
'colors': rgb.reshape(3, -1),
'alpha': alpha.reshape(-1),
'extrinsics': extrinsics,
'intrinsics': intrinsics,
'depths': z.reshape(-1) if z is not None else None,
'mask': mask.reshape(-1) if mask is not None else None,
'size': np.array([rgb.shape[1], rgb.shape[2]] + ratio, dtype=np.float32)
}
def _load_batch(self, data, index, view_ids=None):
if view_ids is None:
view_ids = [next(self.data_index[index]) for _ in range(self.num_view)]
return index, self._load_shape(data[index]), [self._load_view(data[index], view_id) for view_id in view_ids]
def __getitem__(self, index):
if self.cache is not None:
view_ids = [next(self.data_index[index]) for _ in range(self.num_view)]
return copy.deepcopy(self.cache[index % self.total_num_shape][0]), \
copy.deepcopy(self.cache[index % self.total_num_shape][1]), \
[copy.deepcopy(self.cache[index % self.total_num_shape][2][i]) for i in view_ids]
return self._load_batch(self.data, index)
def collater(self, samples):
results = super().collater(samples)
if results is None:
return results
for key in samples[0][2][0]:
if key == 'path':
results[key] = [[d[key] for d in s[2]] for s in samples]
elif samples[0][2][0][key] is not None:
results[key] = torch.from_numpy(
np.array([[d[key] for d in s[2]] for s in samples])
)
results['colors'] = results['colors'].transpose(2, 3)
if results.get('full_rgb', None) is not None:
results['full_rgb'] = results['full_rgb'].transpose(2, 3)
return results
class ShapeViewStreamDataset(BaseWrapperDataset):
"""
Different from ShapeViewDataset.
We merge all the views together into one dataset regardless of the shapes.
** HACK **: an alternative of the ShapeViewDataset
"""
def __init__(self, dataset):
super().__init__(dataset)
self.dataset.repeat == 1
self.dataset.num_view == 1
self.total_num_shape = dataset.total_num_shape
# reset the data_index
self.dataset.data_index = []
for i, d in enumerate(self.data):
for j, _ in enumerate(d['rgb']):
self.dataset.data_index.append((i, j)) # shape i, view j
def __len__(self):
return len(self.dataset.data_index)
def ordered_indices(self):
return np.arange(len(self))
@property
def cache(self):
return self.dataset.cache
@property
def data(self):
return self.dataset.data
def _load_batch(self, data, shape_id, view_ids):
return shape_id, self.dataset._load_shape(data[shape_id]), [self.dataset._load_view(data[shape_id], view_id) for view_id in view_ids]
def __getitem__(self, index):
shape_id, view_id = self.dataset.data_index[index]
if self.cache is not None:
return copy.deepcopy(self.cache[shape_id % self.total_num_shape][0]), \
copy.deepcopy(self.cache[shape_id % self.total_num_shape][1]), \
[copy.deepcopy(self.cache[shape_id % self.total_num_shape][2][view_id])]
return self._load_batch(self.data, shape_id, [view_id])
def _load_binary(self, id, views, phase='train'):
root = os.path.dirname(self.data[id]['ixt'])
npzfile = os.path.join(root, '{}.npz'.format(phase))
try:
with np.load(npzfile, allow_pickle=True) as f:
return f['cache']
except Exception:
caches = [self._load_batch(self.data, id, view_id) for view_id in views]
cache = [caches[0][0], caches[0][1], [caches[i][2][0] for i in range(len(views))]]
if data_utils.get_rank() == 0:
np.savez(npzfile, cache=cache)
return cache
class SampledPixelDataset(BaseWrapperDataset):
"""
A wrapper dataset, which split rendered images into pixels
"""
def __init__(self,
dataset,
num_sample=None,
sampling_on_mask=1.0,
sampling_on_bbox=False,
sampling_at_center=1.0,
resolution=512,
patch_size=1):
super().__init__(dataset)
self.num_sample = num_sample
self.sampling_on_mask = sampling_on_mask
self.sampling_on_bbox = sampling_on_bbox
self.sampling_at_center = sampling_at_center
self.patch_size = patch_size
self.res = resolution
def __getitem__(self, index):
index, data_per_shape, data_per_view = self.dataset[index]
# sample pixels from the original images
sample_index = [
data_utils.sample_pixel_from_image(
data['alpha'].shape[-1],
self.num_sample,
data.get('mask', None)
if data.get('mask', None) is not None
else data.get('alpha', None),
self.sampling_on_mask,
self.sampling_on_bbox,
self.sampling_at_center,
width=int(data['size'][1]),
patch_size=self.patch_size)
for data in data_per_view
]
for i, data in enumerate(data_per_view):
data_per_view[i]['full_rgb'] = copy.deepcopy(data['colors'])
for key in data:
if data[key] is not None \
and (key != 'extrinsics' and key != 'view' and key != 'full_rgb') \
and data[key].shape[-1] > self.num_sample:
if len(data[key].shape) == 2:
data_per_view[i][key] = data[key][:, sample_index[i]]
else:
data_per_view[i][key] = data[key][sample_index[i]]
data_per_view[i]['index'] = sample_index[i]
return index, data_per_shape, data_per_view
def num_tokens(self, index):
return self.dataset.num_view * self.num_sample
class WorldCoordDataset(BaseWrapperDataset):
"""
A wrapper dataset. transform UV space into World space
"""
def __getitem__(self, index):
index, data_per_shape, data_per_view = self.dataset[index]
def camera2world(data):
inv_RT = data['extrinsics']
intrinsics = data_per_shape['intrinsics']
# get camera center (XYZ)
ray_start = inv_RT[:3, 3]
# get points at a random depth (=1)
ray_dir = geometry.get_ray_direction(
ray_start, data['uv'], intrinsics, inv_RT, 1
)
# here we still keep the original data for tracking purpose
data.update({
'ray_start': ray_start,
'ray_dir': ray_dir,
})
return data
return index, data_per_shape, [camera2world(data) for data in data_per_view]
def collater(self, samples):
results = self.dataset.collater(samples)
if results is None:
return results
results['ray_start'] = results['ray_start'].unsqueeze(-2)
results['ray_dir'] = results['ray_dir'].transpose(2, 3)
results['colors'] = results['colors'].transpose(2, 3)
if results.get('full_rgb', None) is not None:
results['full_rgb'] = results['full_rgb'].transpose(2, 3)
return results
class InfiniteDataset(BaseWrapperDataset):
"""
A wrapper dataset which supports infnite sampling from a dataset.
No epochs in this case.
"""
def __init__(self, dataset, max_len=1000000):
super().__init__(dataset)
self.MAXLEN = max_len
def __len__(self):
return self.MAXLEN
def ordered_indices(self):
return np.arange(self.MAXLEN)
def __getitem__(self, index):
actual_length = len(self.dataset)
return self.dataset[index % actual_length]
| 20,801 | 36.821818 | 141 |
py
|
NSVF
|
NSVF-main/fairnr/data/geometry.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn.functional as F
from fairnr.data import data_utils as D
try:
from fairnr.clib._ext import build_octree
except ImportError:
pass
INF = 1000.0
def ones_like(x):
T = torch if isinstance(x, torch.Tensor) else np
return T.ones_like(x)
def stack(x):
T = torch if isinstance(x[0], torch.Tensor) else np
return T.stack(x)
def matmul(x, y):
T = torch if isinstance(x, torch.Tensor) else np
return T.matmul(x, y)
def cross(x, y, axis=0):
T = torch if isinstance(x, torch.Tensor) else np
return T.cross(x, y, axis)
def cat(x, axis=1):
if isinstance(x[0], torch.Tensor):
return torch.cat(x, dim=axis)
return np.concatenate(x, axis=axis)
def normalize(x, axis=-1, order=2):
if isinstance(x, torch.Tensor):
l2 = x.norm(p=order, dim=axis, keepdim=True)
return x / (l2 + 1e-8), l2
else:
l2 = np.linalg.norm(x, order, axis)
l2 = np.expand_dims(l2, axis)
l2[l2==0] = 1
return x / l2, l2
def parse_extrinsics(extrinsics, world2camera=True):
""" this function is only for numpy for now"""
if extrinsics.shape[0] == 3 and extrinsics.shape[1] == 4:
extrinsics = np.vstack([extrinsics, np.array([[0, 0, 0, 1.0]])])
if extrinsics.shape[0] == 1 and extrinsics.shape[1] == 16:
extrinsics = extrinsics.reshape(4, 4)
if world2camera:
extrinsics = np.linalg.inv(extrinsics).astype(np.float32)
return extrinsics
def parse_intrinsics(intrinsics):
fx = intrinsics[0, 0]
fy = intrinsics[1, 1]
cx = intrinsics[0, 2]
cy = intrinsics[1, 2]
return fx, fy, cx, cy
def uv2cam(uv, z, intrinsics, homogeneous=False):
fx, fy, cx, cy = parse_intrinsics(intrinsics)
x_lift = (uv[0] - cx) / fx * z
y_lift = (uv[1] - cy) / fy * z
z_lift = ones_like(x_lift) * z
if homogeneous:
return stack([x_lift, y_lift, z_lift, ones_like(z_lift)])
else:
return stack([x_lift, y_lift, z_lift])
def cam2world(xyz_cam, inv_RT):
return matmul(inv_RT, xyz_cam)[:3]
def r6d2mat(d6: torch.Tensor) -> torch.Tensor:
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalisation per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2)
def get_ray_direction(ray_start, uv, intrinsics, inv_RT, depths=None):
if depths is None:
depths = 1
rt_cam = uv2cam(uv, depths, intrinsics, True)
rt = cam2world(rt_cam, inv_RT)
ray_dir, _ = normalize(rt - ray_start[:, None], axis=0)
return ray_dir
def look_at_rotation(camera_position, at=None, up=None, inverse=False, cv=False):
"""
This function takes a vector 'camera_position' which specifies the location
of the camera in world coordinates and two vectors `at` and `up` which
indicate the position of the object and the up directions of the world
coordinate system respectively. The object is assumed to be centered at
the origin.
The output is a rotation matrix representing the transformation
from world coordinates -> view coordinates.
Input:
camera_position: 3
at: 1 x 3 or N x 3 (0, 0, 0) in default
up: 1 x 3 or N x 3 (0, 1, 0) in default
"""
if at is None:
at = torch.zeros_like(camera_position)
else:
at = torch.tensor(at).type_as(camera_position)
if up is None:
up = torch.zeros_like(camera_position)
up[2] = -1
else:
up = torch.tensor(up).type_as(camera_position)
z_axis = normalize(at - camera_position)[0]
x_axis = normalize(cross(up, z_axis))[0]
y_axis = normalize(cross(z_axis, x_axis))[0]
R = cat([x_axis[:, None], y_axis[:, None], z_axis[:, None]], axis=1)
return R
def ray(ray_start, ray_dir, depths):
return ray_start + ray_dir * depths
def compute_normal_map(ray_start, ray_dir, depths, RT, width=512, proj=False):
# TODO:
# this function is pytorch-only (for not)
wld_coords = ray(ray_start, ray_dir, depths.unsqueeze(-1)).transpose(0, 1)
cam_coords = matmul(RT[:3, :3], wld_coords) + RT[:3, 3].unsqueeze(-1)
cam_coords = D.unflatten_img(cam_coords, width)
# estimate local normal
shift_l = cam_coords[:, 2:, :]
shift_r = cam_coords[:, :-2, :]
shift_u = cam_coords[:, :, 2: ]
shift_d = cam_coords[:, :, :-2]
diff_hor = normalize(shift_r - shift_l, axis=0)[0][:, :, 1:-1]
diff_ver = normalize(shift_u - shift_d, axis=0)[0][:, 1:-1, :]
normal = cross(diff_hor, diff_ver)
_normal = normal.new_zeros(*cam_coords.size())
_normal[:, 1:-1, 1:-1] = normal
_normal = _normal.reshape(3, -1).transpose(0, 1)
# compute the projected color
if proj:
_normal = normalize(_normal, axis=1)[0]
wld_coords0 = ray(ray_start, ray_dir, 0).transpose(0, 1)
cam_coords0 = matmul(RT[:3, :3], wld_coords0) + RT[:3, 3].unsqueeze(-1)
cam_coords0 = D.unflatten_img(cam_coords0, width)
cam_raydir = normalize(cam_coords - cam_coords0, 0)[0].reshape(3, -1).transpose(0, 1)
proj_factor = (_normal * cam_raydir).sum(-1).abs() * 0.8 + 0.2
return proj_factor
return _normal
def trilinear_interp(p, q, point_feats):
weights = (p * q + (1 - p) * (1 - q)).prod(dim=-1, keepdim=True)
if point_feats.dim() == 2:
point_feats = point_feats.view(point_feats.size(0), 8, -1)
point_feats = (weights * point_feats).sum(1)
return point_feats
# helper functions for encoder
def padding_points(xs, pad):
if len(xs) == 1:
return xs[0].unsqueeze(0)
maxlen = max([x.size(0) for x in xs])
xt = xs[0].new_ones(len(xs), maxlen, xs[0].size(1)).fill_(pad)
for i in range(len(xs)):
xt[i, :xs[i].size(0)] = xs[i]
return xt
def pruning_points(feats, points, scores, depth=0, th=0.5):
if depth > 0:
g = int(8 ** depth)
scores = scores.reshape(scores.size(0), -1, g).sum(-1, keepdim=True)
scores = scores.expand(*scores.size()[:2], g).reshape(scores.size(0), -1)
alpha = (1 - torch.exp(-scores)) > th
feats = [feats[i][alpha[i]] for i in range(alpha.size(0))]
points = [points[i][alpha[i]] for i in range(alpha.size(0))]
points = padding_points(points, INF)
feats = padding_points(feats, 0)
return feats, points
def offset_points(point_xyz, quarter_voxel=1, offset_only=False, bits=2):
c = torch.arange(1, 2 * bits, 2, device=point_xyz.device)
ox, oy, oz = torch.meshgrid([c, c, c])
offset = (torch.cat([
ox.reshape(-1, 1),
oy.reshape(-1, 1),
oz.reshape(-1, 1)], 1).type_as(point_xyz) - bits) / float(bits - 1)
if not offset_only:
return point_xyz.unsqueeze(1) + offset.unsqueeze(0).type_as(point_xyz) * quarter_voxel
return offset.type_as(point_xyz) * quarter_voxel
def discretize_points(voxel_points, voxel_size):
# this function turns voxel centers/corners into integer indeices
# we assume all points are alreay put as voxels (real numbers)
minimal_voxel_point = voxel_points.min(dim=0, keepdim=True)[0]
voxel_indices = ((voxel_points - minimal_voxel_point) / voxel_size).round_().long() # float
residual = (voxel_points - voxel_indices.type_as(voxel_points) * voxel_size).mean(0, keepdim=True)
return voxel_indices, residual
def splitting_points(point_xyz, point_feats, values, half_voxel):
# generate new centers
quarter_voxel = half_voxel * .5
new_points = offset_points(point_xyz, quarter_voxel).reshape(-1, 3)
old_coords = discretize_points(point_xyz, quarter_voxel)[0]
new_coords = offset_points(old_coords).reshape(-1, 3)
new_keys0 = offset_points(new_coords).reshape(-1, 3)
# get unique keys and inverse indices (for original key0, where it maps to in keys)
new_keys, new_feats = torch.unique(new_keys0, dim=0, sorted=True, return_inverse=True)
new_keys_idx = new_feats.new_zeros(new_keys.size(0)).scatter_(
0, new_feats, torch.arange(new_keys0.size(0), device=new_feats.device) // 64)
# recompute key vectors using trilinear interpolation
new_feats = new_feats.reshape(-1, 8)
if values is not None:
p = (new_keys - old_coords[new_keys_idx]).type_as(point_xyz).unsqueeze(1) * .25 + 0.5 # (1/4 voxel size)
q = offset_points(p, .5, offset_only=True).unsqueeze(0) + 0.5 # BUG?
point_feats = point_feats[new_keys_idx]
point_feats = F.embedding(point_feats, values).view(point_feats.size(0), -1)
new_values = trilinear_interp(p, q, point_feats)
else:
new_values = None
return new_points, new_feats, new_values, new_keys
def expand_points(voxel_points, voxel_size):
_voxel_size = min([
torch.sqrt(((voxel_points[j:j+1] - voxel_points[j+1:]) ** 2).sum(-1).min())
for j in range(100)])
depth = int(np.round(torch.log2(_voxel_size / voxel_size)))
if depth > 0:
half_voxel = _voxel_size / 2.0
for _ in range(depth):
voxel_points = offset_points(voxel_points, half_voxel / 2.0).reshape(-1, 3)
half_voxel = half_voxel / 2.0
return voxel_points, depth
def get_edge(depth_pts, voxel_pts, voxel_size, th=0.05):
voxel_pts = offset_points(voxel_pts, voxel_size / 2.0)
diff_pts = (voxel_pts - depth_pts[:, None, :]).norm(dim=2)
ab = diff_pts.sort(dim=1)[0][:, :2]
a, b = ab[:, 0], ab[:, 1]
c = voxel_size
p = (ab.sum(-1) + c) / 2.0
h = (p * (p - a) * (p - b) * (p - c)) ** 0.5 / c
return h < (th * voxel_size)
# fill-in image
def fill_in(shape, hits, input, initial=1.0):
input_sizes = [k for k in input.size()]
if (len(input_sizes) == len(shape)) and \
all([shape[i] == input_sizes[i] for i in range(len(shape))]):
return input # shape is the same no need to fill
if isinstance(initial, torch.Tensor):
output = initial.expand(*shape)
else:
output = input.new_ones(*shape) * initial
if input is not None:
if len(shape) == 1:
return output.masked_scatter(hits, input)
return output.masked_scatter(hits.unsqueeze(-1).expand(*shape), input)
return output
def build_easy_octree(points, half_voxel):
coords, residual = discretize_points(points, half_voxel)
ranges = coords.max(0)[0] - coords.min(0)[0]
depths = torch.log2(ranges.max().float()).ceil_().long() - 1
center = (coords.max(0)[0] + coords.min(0)[0]) / 2
centers, children = build_octree(center, coords, int(depths))
centers = centers.float() * half_voxel + residual # transform back to float
return centers, children
def cartesian_to_spherical(xyz):
""" xyz: batch x 3
"""
r = xyz.norm(p=2, dim=-1)
theta = torch.atan2(xyz[:, :2].norm(p=2, dim=-1), xyz[:, 2])
phi = torch.atan2(xyz[:, 1], xyz[:, 0])
return torch.stack((r, theta, phi), 1)
def spherical_to_cartesian(rtp):
x = rtp[:, 0] * torch.sin(rtp[:, 1]) * torch.cos(rtp[:, 2])
y = rtp[:, 0] * torch.sin(rtp[:, 1]) * torch.sin(rtp[:, 2])
z = rtp[:, 0] * torch.cos(rtp[:, 1])
return torch.stack((x, y, z), 1)
| 11,984 | 33.941691 | 112 |
py
|
NSVF
|
NSVF-main/fairnr/data/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .shape_dataset import (
ShapeDataset, ShapeViewDataset, ShapeViewStreamDataset,
SampledPixelDataset, WorldCoordDataset,
InfiniteDataset
)
__all__ = [
'ShapeDataset',
'ShapeViewDataset',
'ShapeViewStreamDataset',
'SampledPixelDataset',
'WorldCoordDataset',
]
| 474 | 24 | 65 |
py
|
NSVF
|
NSVF-main/fairnr/data/trajectory.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
TRAJECTORY_REGISTRY = {}
def register_traj(name):
def register_traj_fn(fn):
if name in TRAJECTORY_REGISTRY:
raise ValueError('Cannot register duplicate trajectory ({})'.format(name))
TRAJECTORY_REGISTRY[name] = fn
return fn
return register_traj_fn
def get_trajectory(name):
return TRAJECTORY_REGISTRY.get(name, None)
@register_traj('circle')
def circle(radius=3.5, h=0.0, axis='z', t0=0, r=1):
if axis == 'z':
return lambda t: [radius * np.cos(r * t+t0), radius * np.sin(r * t+t0), h]
elif axis == 'y':
return lambda t: [radius * np.cos(r * t+t0), h, radius * np.sin(r * t+t0)]
else:
return lambda t: [h, radius * np.cos(r * t+t0), radius * np.sin(r * t+t0)]
@register_traj('zoomin_circle')
def zoomin_circle(radius=3.5, h=0.0, axis='z', t0=0, r=1):
ra = lambda t: 0.1 + abs(4.0 - t * 2 / np.pi)
if axis == 'z':
return lambda t: [radius * ra(t) * np.cos(r * t+t0), radius * ra(t) * np.sin(r * t+t0), h]
elif axis == 'y':
return lambda t: [radius * ra(t) * np.cos(r * t+t0), h, radius * ra(t) * np.sin(r * t+t0)]
else:
return lambda t: [h, radius * (4.2 - t * 2 / np.pi) * np.cos(r * t+t0), radius * (4.2 - t * 2 / np.pi) * np.sin(r * t+t0)]
@register_traj('zoomin_line')
def zoomin_line(radius=3.5, h=0.0, axis='z', t0=0, r=1, min_r=0.0001, max_r=10, step_r=10):
ra = lambda t: min_r + (max_r - min_r) * t * 180 / np.pi / step_r
if axis == 'z':
return lambda t: [radius * ra(t) * np.cos(t0), radius * ra(t) * np.sin(t0), h * ra(t)]
elif axis == 'y':
return lambda t: [radius * ra(t) * np.cos(t0), h, radius * ra(t) * np.sin(t0)]
else:
return lambda t: [h, radius * (4.2 - t * 2 / np.pi) * np.cos(r * t+t0), radius * (4.2 - t * 2 / np.pi) * np.sin(r * t+t0)]
| 2,045 | 34.894737 | 130 |
py
|
NSVF
|
NSVF-main/fairnr/tasks/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('fairnr.tasks.' + task_name)
| 420 | 31.384615 | 65 |
py
|
NSVF
|
NSVF-main/fairnr/tasks/neural_rendering.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, copy
import json
import torch
import imageio
import numpy as np
from collections import defaultdict
from torchvision.utils import save_image
from argparse import Namespace
from fairseq.tasks import FairseqTask, register_task
from fairseq.optim.fp16_optimizer import FP16Optimizer
from fairseq.logging import progress_bar
from fairnr.data import (
ShapeViewDataset, SampledPixelDataset, ShapeViewStreamDataset,
WorldCoordDataset, ShapeDataset, InfiniteDataset
)
from fairnr.data.data_utils import write_images, recover_image, parse_views
from fairnr.data.geometry import ray, compute_normal_map
from fairnr.renderer import NeuralRenderer
from fairnr.data.trajectory import get_trajectory
from fairnr import ResetTrainerException
@register_task("single_object_rendering")
class SingleObjRenderingTask(FairseqTask):
"""
Task for remembering & rendering a single object.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser"""
parser.add_argument("data", help='data-path or data-directoy')
parser.add_argument("--object-id-path", type=str, help='path to object indices', default=None)
parser.add_argument("--no-preload", action="store_true")
parser.add_argument("--no-load-binary", action="store_true")
parser.add_argument("--load-depth", action="store_true",
help="load depth images if exists")
parser.add_argument("--transparent-background", type=str, default="1.0",
help="background color if the image is transparent")
parser.add_argument("--load-mask", action="store_true",
help="load pre-computed masks which is useful for subsampling during training.")
parser.add_argument("--train-views", type=str, default="0..50",
help="views sampled for training, you can set specific view id, or a range")
parser.add_argument("--valid-views", type=str, default="0..50",
help="views sampled for validation, you can set specific view id, or a range")
parser.add_argument("--test-views", type=str, default="0",
help="views sampled for rendering, only used for showing rendering results.")
parser.add_argument("--subsample-valid", type=int, default=-1,
help="if set > -1, subsample the validation (when training set is too large)")
parser.add_argument("--view-per-batch", type=int, default=6,
help="number of views training each batch (each GPU)")
parser.add_argument("--valid-view-per-batch", type=int, default=1,
help="number of views training each batch (each GPU)")
parser.add_argument("--view-resolution", type=str, default='64x64',
help="width for the squared image. downsampled from the original.")
parser.add_argument('--valid-view-resolution', type=str, default=None,
help="if not set, if valid view resolution will be train view resolution")
parser.add_argument("--min-color", choices=(0, -1), default=-1, type=int,
help="RGB range used in the model. conventionally used -1 ~ 1")
parser.add_argument("--virtual-epoch-steps", type=int, default=None,
help="virtual epoch used in Infinite Dataset. if None, set max-update")
parser.add_argument("--pruning-every-steps", type=int, default=None,
help="if the model supports pruning, prune unecessary voxels")
parser.add_argument("--half-voxel-size-at", type=str, default=None,
help='specific detailed number of updates to half the voxel sizes')
parser.add_argument("--reduce-step-size-at", type=str, default=None,
help='specific detailed number of updates to reduce the raymarching step sizes')
parser.add_argument("--prune-voxel-at", type=str, default=None,
help='specific detailed number of pruning voxels')
parser.add_argument("--rendering-every-steps", type=int, default=None,
help="if set, enables rendering online with default parameters")
parser.add_argument("--rendering-args", type=str, metavar='JSON')
parser.add_argument("--pruning-th", type=float, default=0.5,
help="if larger than this, we choose keep the voxel.")
parser.add_argument("--pruning-with-train-stats", action='store_true',
help="if set, model will run over the training set statstics to prune voxels.")
parser.add_argument("--pruning-rerun-train-set", action='store_true',
help="only works when --pruning-with-train-stats is also set.")
parser.add_argument("--output-valid", type=str, default=None)
def __init__(self, args):
super().__init__(args)
self._trainer, self._dummy_batch = None, None
# check dataset
self.train_data = self.val_data = self.test_data = args.data
self.object_ids = None if args.object_id_path is None else \
{line.strip(): i for i, line in enumerate(open(args.object_id_path))}
self.output_valid = getattr(args, "output_valid", None)
if os.path.isdir(args.data):
if os.path.exists(args.data + '/train.txt'):
self.train_data = args.data + '/train.txt'
if os.path.exists(args.data + '/val.txt'):
self.val_data = args.data + '/val.txt'
if os.path.exists(args.data + '/test.txt'):
self.test_data = args.data + '/test.txt'
if self.object_ids is None and os.path.exists(args.data + '/object_ids.txt'):
self.object_ids = {line.strip(): i for i, line in enumerate(open(args.data + '/object_ids.txt'))}
if self.object_ids is not None:
self.ids_object = {self.object_ids[o]: o for o in self.object_ids}
else:
self.ids_object = {0: 'model'}
if len(self.args.tensorboard_logdir) > 0 and getattr(args, "distributed_rank", -1) == 0:
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(self.args.tensorboard_logdir + '/images')
else:
self.writer = None
self._num_updates = {'pv': -1, 'sv': -1, 'rs': -1, 're': -1}
self.pruning_every_steps = getattr(self.args, "pruning_every_steps", None)
self.pruning_th = getattr(self.args, "pruning_th", 0.5)
self.rendering_every_steps = getattr(self.args, "rendering_every_steps", None)
self.steps_to_half_voxels = getattr(self.args, "half_voxel_size_at", None)
self.steps_to_reduce_step = getattr(self.args, "reduce_step_size_at", None)
self.steps_to_prune_voxels = getattr(self.args, "prune_voxel_at", None)
if self.steps_to_half_voxels is not None:
self.steps_to_half_voxels = [int(s) for s in self.steps_to_half_voxels.split(',')]
if self.steps_to_reduce_step is not None:
self.steps_to_reduce_step = [int(s) for s in self.steps_to_reduce_step.split(',')]
if self.steps_to_prune_voxels is not None:
self.steps_to_prune_voxels = [int(s) for s in self.steps_to_prune_voxels.split(',')]
if self.rendering_every_steps is not None:
gen_args = {
'path': args.save_dir,
'render_beam': 1, 'render_resolution': '512x512',
'render_num_frames': 120, 'render_angular_speed': 3,
'render_output_types': ["rgb"], 'render_raymarching_steps': 10,
'render_at_vector': "(0,0,0)", 'render_up_vector': "(0,0,-1)",
'render_path_args': "{'radius': 1.5, 'h': 0.5}",
'render_path_style': 'circle', "render_output": None
}
gen_args.update(json.loads(getattr(args, 'rendering_args', '{}') or '{}'))
self.renderer = self.build_generator(Namespace(**gen_args))
else:
self.renderer = None
self.train_views = parse_views(args.train_views)
self.valid_views = parse_views(args.valid_views)
self.test_views = parse_views(args.test_views)
@classmethod
def setup_task(cls, args, **kwargs):
"""
Setup the task
"""
return cls(args)
def repeat_dataset(self, split):
return 1 if split != 'train' else self.args.distributed_world_size # IMPORTANT!
def load_dataset(self, split, **kwargs):
"""
Load a given dataset split (train, valid, test)
"""
self.datasets[split] = ShapeViewDataset(
self.train_data if split == 'train' else \
self.val_data if split == 'valid' else self.test_data,
views=self.train_views if split == 'train' else \
self.valid_views if split == 'valid' else self.test_views,
num_view=self.args.view_per_batch if split == 'train' else \
self.args.valid_view_per_batch if split == 'valid' else 1,
resolution=self.args.view_resolution if split == 'train' else \
getattr(self.args, "valid_view_resolution", self.args.view_resolution) if split == 'valid' else \
getattr(self.args, "render_resolution", self.args.view_resolution),
subsample_valid=self.args.subsample_valid if split == 'valid' else -1,
train=(split=='train'),
load_depth=self.args.load_depth and (split!='test'),
load_mask=self.args.load_mask and (split!='test'),
repeat=self.repeat_dataset(split),
preload=(not getattr(self.args, "no_preload", False)) and (split!='test'),
binarize=(not getattr(self.args, "no_load_binary", False)) and (split!='test'),
bg_color=getattr(self.args, "transparent_background", "1,1,1"),
min_color=getattr(self.args, "min_color", -1),
ids=self.object_ids
)
if split == 'train':
max_step = getattr(self.args, "virtual_epoch_steps", None)
if max_step is not None:
total_num_models = max_step * self.args.distributed_world_size * self.args.max_sentences
else:
total_num_models = 10000000
if getattr(self.args, "pruning_rerun_train_set", False):
self._unique_trainset = ShapeViewStreamDataset(copy.deepcopy(self.datasets[split])) # backup
self._unique_trainitr = self.get_batch_iterator(
self._unique_trainset, max_sentences=self.args.max_sentences_valid, seed=self.args.seed,
num_shards=self.args.distributed_world_size, shard_id=self.args.distributed_rank,
num_workers=self.args.num_workers)
self.datasets[split] = InfiniteDataset(self.datasets[split], total_num_models)
if split == 'valid':
self.datasets[split] = ShapeViewStreamDataset(self.datasets[split])
def build_generator(self, args):
"""
build a neural renderer for visualization
"""
return NeuralRenderer(
beam=args.render_beam,
resolution=args.render_resolution,
frames=args.render_num_frames,
speed=args.render_angular_speed,
raymarching_steps=args.render_raymarching_steps,
path_gen=get_trajectory(args.render_path_style)(
**eval(args.render_path_args)
),
at=eval(args.render_at_vector),
up=eval(args.render_up_vector),
fps=getattr(args, "render_save_fps", 24),
output_dir=args.render_output if args.render_output is not None
else os.path.join(args.path, "output"),
output_type=args.render_output_types,
test_camera_poses=getattr(args, "render_camera_poses", None),
test_camera_intrinsics=getattr(args, "render_camera_intrinsics", None),
test_camera_views=getattr(args, "render_views", None)
)
def setup_trainer(self, trainer):
# give the task ability to access the global trainer functions
self._trainer = trainer
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return None
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return None
def update_step(self, num_updates, name='re'):
"""Task level update when number of updates increases.
This is called after the optimization step and learning rate
update at each iteration.
"""
self._num_updates[name] = num_updates
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
if (((self.pruning_every_steps is not None) and \
(update_num % self.pruning_every_steps == 0) and \
(update_num > 0)) or \
((self.steps_to_prune_voxels is not None) and \
update_num in self.steps_to_prune_voxels) \
) and \
(update_num > self._num_updates['pv']) and \
hasattr(model, 'prune_voxels'):
model.eval()
if getattr(self.args, "pruning_rerun_train_set", False):
with torch.no_grad():
model.clean_caches(reset=True)
progress = progress_bar.progress_bar(
self._unique_trainitr.next_epoch_itr(shuffle=False),
prefix=f"pruning based statiscs over training set",
tensorboard_logdir=None,
default_log_format=self.args.log_format if self.args.log_format is not None else "tqdm")
for step, inner_sample in enumerate(progress):
outs = model(**self._trainer._prepare_sample(self.filter_dummy(inner_sample)))
progress.log(stats=outs['other_logs'], tag='track', step=step)
model.prune_voxels(self.pruning_th, train_stats=getattr(self.args, "pruning_with_train_stats", False))
self.update_step(update_num, 'pv')
if self.steps_to_half_voxels is not None and \
(update_num in self.steps_to_half_voxels) and \
(update_num > self._num_updates['sv']):
model.split_voxels()
self.update_step(update_num, 'sv')
raise ResetTrainerException
if self.rendering_every_steps is not None and \
(update_num % self.rendering_every_steps == 0) and \
(update_num > 0) and \
self.renderer is not None and \
(update_num > self._num_updates['re']):
sample_clone = {key: sample[key].clone() if sample[key] is not None else None for key in sample }
outputs = self.inference_step(self.renderer, [model], [sample_clone, 0])[1]
if getattr(self.args, "distributed_rank", -1) == 0: # save only for master
self.renderer.save_images(outputs, update_num)
self.steps_to_half_voxels = [a for a in self.steps_to_half_voxels if a != update_num]
if self.steps_to_reduce_step is not None and \
update_num in self.steps_to_reduce_step and \
(update_num > self._num_updates['rs']):
model.reduce_stepsize()
self.update_step(update_num, 'rs')
self.update_step(update_num, 'step')
return super().train_step(sample, model, criterion, optimizer, update_num, ignore_grad)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
model.add_eval_scores(logging_output, sample, model.cache, criterion, outdir=self.output_valid)
if self.writer is not None:
images = model.visualize(sample, shape=0, view=0)
if images is not None:
write_images(self.writer, images, self._num_updates['step'])
return loss, sample_size, logging_output
def save_image(self, img, id, view, group='gt'):
object_name = self.ids_object[id.item()]
def _mkdir(x):
if not os.path.exists(x):
os.mkdir(x)
_mkdir(self.output_valid)
_mkdir(os.path.join(self.output_valid, group))
_mkdir(os.path.join(self.output_valid, group, object_name))
imageio.imsave(os.path.join(
self.output_valid, group, object_name,
'{:04d}.png'.format(view)),
(img * 255).astype(np.uint8))
def filter_dummy(self, sample):
if self._dummy_batch is None:
self._dummy_batch = sample
if sample is None:
sample = self._dummy_batch
return sample
| 17,291 | 50.159763 | 114 |
py
|
NSVF
|
NSVF-main/fairnr_cli/render.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a copy of fairseq-generate while simpler for other usage.
"""
import logging
import math
import os
import sys
import time
import torch
import imageio
import numpy as np
from fairseq import checkpoint_utils, progress_bar, tasks, utils
from fairseq.meters import StopwatchMeter, TimeMeter
from fairnr import options
def main(args):
assert args.path is not None, '--path required for generation!'
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset))
with open(output_path, 'w', buffering=1) as h:
return _main(args, h)
else:
return _main(args, sys.stdout)
def _main(args, output_file):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=output_file,
)
logger = logging.getLogger('fairnr_cli.render')
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(os.pathsep),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(args)
output_files, step= [], 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for i, sample in enumerate(t):
sample = utils.move_to_cuda(sample) if use_cuda else sample
gen_timer.start()
step, _output_files = task.inference_step(generator, models, [sample, step])
output_files += _output_files
gen_timer.stop(500)
wps_meter.update(500)
t.log({'wps': round(wps_meter.avg)})
break
# if i > 5:
# break
generator.save_images(output_files, combine_output=args.render_combine_output)
def cli_main():
parser = options.get_rendering_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 3,570 | 28.03252 | 96 |
py
|
NSVF
|
NSVF-main/fairnr_cli/render_multigpu.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a copy of fairseq-generate while simpler for other usage.
"""
import logging
import math
import os
import sys
import time
import torch
import imageio
import numpy as np
from fairseq import checkpoint_utils, progress_bar, tasks, utils, distributed_utils
from fairseq.meters import StopwatchMeter, TimeMeter
from fairseq.options import add_distributed_training_args
from fairnr import options
def main(args, *kwargs):
assert args.path is not None, '--path required for generation!'
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset))
with open(output_path, 'w', buffering=1) as h:
return _main(args, h)
else:
return _main(args, sys.stdout)
def _main(args, output_file):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=output_file,
)
logger = logging.getLogger('fairnr_cli.render')
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(os.pathsep),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
if args.fp16:
model.half()
if use_cuda:
model.cuda()
logging.info(model)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_workers=args.num_workers
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(args)
shard_id, world_size = args.distributed_rank, args.distributed_world_size
output_files = []
if generator.test_poses is not None:
total_frames = generator.test_poses.shape[0]
_frames = int(np.floor(total_frames / world_size))
step = shard_id * _frames
frames = _frames if shard_id < (world_size - 1) else total_frames - step
else:
step = shard_id * args.render_num_frames
frames = args.render_num_frames
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for i, sample in enumerate(t):
sample = utils.move_to_cuda(sample) if use_cuda else sample
gen_timer.start()
step, _output_files = task.inference_step(
generator, models, [sample, step, frames])
output_files += _output_files
gen_timer.stop(500)
wps_meter.update(500)
t.log({'wps': round(wps_meter.avg)})
timestamp = generator.save_images(
output_files, steps='shard{}'.format(shard_id), combine_output=args.render_combine_output)
# join videos from all GPUs and delete temp files
try:
timestamps = distributed_utils.all_gather_list(timestamp)
except:
timestamps = [timestamp]
if shard_id == 0:
generator.merge_videos(timestamps)
def cli_main():
parser = options.get_rendering_parser()
add_distributed_training_args(parser)
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == '__main__':
cli_main()
| 4,399 | 29.985915 | 98 |
py
|
NSVF
|
NSVF-main/fairnr_cli/validate.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
import numpy as np
import torch
from itertools import chain
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.logging import metrics, progress_bar
from fairseq.options import add_distributed_training_args
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairnr_cli.validate')
def main(args, override_args=None):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if override_args is not None:
try:
override_args = override_args['override_args']
except TypeError:
override_args = override_args
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=overrides,
suffix=getattr(args, "checkpoint_suffix", ""),
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(model_args)
# Build criterion
criterion = task.build_criterion(model_args)
if use_fp16:
criterion.half()
if use_cuda:
criterion.cuda()
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=1)
dataset = task.dataset(subset)
except KeyError:
raise Exception('Cannot find dataset: ' + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_workers=args.num_workers,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.apply_to_sample(
lambda t: t.half() if t.dtype is torch.float32 else t, sample) if use_fp16 else sample
try:
with torch.no_grad(): # do not save backward passes
max_num_rays = 900 * 900
if sample['uv'].shape[3] > max_num_rays:
sample['ray_split'] = sample['uv'].shape[3] // max_num_rays
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
except TypeError:
break
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
# summarize all the gpus
if args.distributed_world_size > 1:
all_log_output = list(zip(*distributed_utils.all_gather_list([log_output])))[0]
log_output = {
key: np.mean([log[key] for log in all_log_output])
for key in all_log_output[0]
}
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
# support multi-gpu validation, use all available gpus
default_world_size = max(1, torch.cuda.device_count())
if args.distributed_world_size < default_world_size:
args.distributed_world_size = default_world_size
override_args.distributed_world_size = default_world_size
distributed_utils.call_main(args, main, override_args=override_args)
if __name__ == '__main__':
cli_main()
| 5,384 | 33.082278 | 102 |
py
|
NSVF
|
NSVF-main/fairnr_cli/extract.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This code is used for extact voxels/meshes from the learne model
"""
import logging
import numpy as np
import torch
import sys, os
import argparse
from fairseq import options
from fairseq import checkpoint_utils
from plyfile import PlyData, PlyElement
def main(args):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairnr_cli.extract')
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path], suffix=getattr(args, "checkpoint_suffix", ""))
model = models[0]
if use_cuda:
model.cuda()
if args.format == 'mc_mesh':
plydata = model.encoder.export_surfaces(
model.field, th=args.mc_threshold,
bits=2 * args.mc_num_samples_per_halfvoxel)
elif args.format == 'voxel_center':
plydata = model.encoder.export_voxels(False)
elif args.format == 'voxel_mesh':
plydata = model.encoder.export_voxels(True)
else:
raise NotImplementedError
# write to ply file.
if not os.path.exists(args.output):
os.makedirs(args.output)
plydata.text = args.savetext
plydata.write(open(os.path.join(args.output, args.name + '.ply'), 'wb'))
def cli_main():
parser = argparse.ArgumentParser(description='Extract geometry from a trained model (only for learnable embeddings).')
parser.add_argument('--path', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--name', type=str, default='sparsevoxel')
parser.add_argument('--format', type=str, choices=['voxel_center', 'voxel_mesh', 'mc_mesh'])
parser.add_argument('--savetext', action='store_true', help='save .ply in plain text')
parser.add_argument('--mc-num-samples-per-halfvoxel', type=int, default=8,
help="""the number of point samples every half voxel-size for marching cube.
For instance, by setting to 8, it will use (8 x 2) ^ 3 = 4096 points to compute density for each voxel.
In practise, the larger this number is, the more accurate surface you get.
""")
parser.add_argument('--mc-threshold', type=float, default=0.5,
help="""the threshold used to find the isosurface from the learned implicit field.
In our implementation, we define our values as ``1 - exp(-max(0, density))``
where "0" is empty and "1" is fully occupied.
""")
parser.add_argument('--user-dir', default='fairnr')
parser.add_argument('--cpu', action='store_true')
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 3,180 | 38.7625 | 127 |
py
|
NSVF
|
NSVF-main/fairnr_cli/launch_slurm.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random, shlex
import os, sys, subprocess
def launch_cluster(slurm_args, model_args):
# prepare
jobname = slurm_args.get('job-name', 'test')
train_log = slurm_args.get('output', None)
train_stderr = slurm_args.get('error', None)
nodes, gpus = slurm_args.get('nodes', 1), slurm_args.get('gpus', 8)
if not slurm_args.get('local', False):
assert (train_log is not None) and (train_stderr is not None)
# parse slurm
train_cmd = ['python', 'train.py', ]
train_cmd.extend(['--distributed-world-size', str(nodes * gpus)])
if nodes > 1:
train_cmd.extend(['--distributed-port', str(get_random_port())])
train_cmd += model_args
base_srun_cmd = [
'srun',
'--job-name', jobname,
'--output', train_log,
'--error', train_stderr,
'--open-mode', 'append',
'--unbuffered',
]
srun_cmd = base_srun_cmd + train_cmd
srun_cmd_str = ' '.join(map(shlex.quote, srun_cmd))
srun_cmd_str = srun_cmd_str + ' &'
sbatch_cmd = [
'sbatch',
'--job-name', jobname,
'--partition', slurm_args.get('partition', 'learnfair'),
'--gres', 'gpu:volta:{}'.format(gpus),
'--nodes', str(nodes),
'--ntasks-per-node', '1',
'--cpus-per-task', '48',
'--output', train_log,
'--error', train_stderr,
'--open-mode', 'append',
'--signal', 'B:USR1@180',
'--time', slurm_args.get('time', '4320'),
'--mem', slurm_args.get('mem', '500gb'),
'--exclusive',
]
if 'constraint' in slurm_args:
sbatch_cmd += ['-C', slurm_args.get('constraint')]
if 'comment' in slurm_args:
sbatch_cmd += ['--comment', slurm_args.get('comment')]
wrapped_cmd = requeue_support() + '\n' + srun_cmd_str + ' \n wait $! \n sleep 610 & \n wait $!'
sbatch_cmd += ['--wrap', wrapped_cmd]
sbatch_cmd_str = ' '.join(map(shlex.quote, sbatch_cmd))
# start training
env = os.environ.copy()
env['OMP_NUM_THREADS'] = '2'
if env.get('SLURM_ARGS', None) is not None:
del env['SLURM_ARGS']
if nodes > 1:
env['NCCL_SOCKET_IFNAME'] = '^docker0,lo'
env['NCCL_DEBUG'] = 'INFO'
if slurm_args.get('dry-run', False):
print(sbatch_cmd_str)
elif slurm_args.get('local', False):
assert nodes == 1, 'distributed training cannot be combined with local'
if 'CUDA_VISIBLE_DEVICES' not in env:
env['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, range(gpus)))
env['NCCL_DEBUG'] = 'INFO'
if train_log is not None:
train_proc = subprocess.Popen(train_cmd, env=env, stdout=subprocess.PIPE)
tee_proc = subprocess.Popen(['tee', '-a', train_log], stdin=train_proc.stdout)
train_proc.stdout.close()
train_proc.wait()
tee_proc.wait()
else:
train_proc = subprocess.Popen(train_cmd, env=env)
train_proc.wait()
else:
with open(train_log, 'a') as train_log_h:
print(f'running command: {sbatch_cmd_str}\n')
with subprocess.Popen(sbatch_cmd, stdout=subprocess.PIPE, env=env) as train_proc:
stdout = train_proc.stdout.read().decode('utf-8')
print(stdout, file=train_log_h)
try:
job_id = int(stdout.rstrip().split()[-1])
return job_id
except IndexError:
return None
def launch(slurm_args, model_args):
job_id = launch_cluster(slurm_args, model_args)
if job_id is not None:
print('Launched {}'.format(job_id))
else:
print('Failed.')
def requeue_support():
return """
trap_handler () {
echo "Caught signal: " $1
# SIGTERM must be bypassed
if [ "$1" = "TERM" ]; then
echo "bypass sigterm"
else
# Submit a new job to the queue
echo "Requeuing " $SLURM_JOB_ID
scontrol requeue $SLURM_JOB_ID
fi
}
# Install signal handler
trap 'trap_handler USR1' USR1
trap 'trap_handler TERM' TERM
"""
def get_random_port():
old_state = random.getstate()
random.seed()
port = random.randint(10000, 20000)
random.setstate(old_state)
return port
| 4,727 | 32.771429 | 99 |
py
|
NSVF
|
NSVF-main/fairnr_cli/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 177 | 34.6 | 65 |
py
|
NSVF
|
NSVF-main/fairnr_cli/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
This file is mostly copied from the original fairseq code
"""
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.trainer import Trainer
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairnr import ResetTrainerException
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairnr_cli.train')
def main(args, init_distributed=False):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
metrics.reset()
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info('model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
logger.info('num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion)
else:
trainer = MegatronTrainer(args, task, model, criterion)
task.setup_trainer(trainer)
logger.info('training on {} GPUs'.format(args.distributed_world_size))
logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
while (
lr > args.min_lr
and epoch_itr.next_epoch_idx <= max_epoch
):
# train for one epoch
should_end_training = train(args, trainer, task, epoch_itr)
valid_losses = validate_and_save(args, trainer, task, epoch_itr, valid_subsets)
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=(os.pathsep in getattr(args, 'data', '')),
)
if should_end_training:
break
train_meter.stop()
logger.info('done training in {:.1f} seconds'.format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, 'best', None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info('early stop since valid performance hasn\'t improved for last {} runs'.format(args.patience))
return True
else:
return False
@metrics.aggregate('train')
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
# task specific setup per epoch
task.begin_epoch(epoch_itr.epoch, trainer.get_model())
valid_subsets = args.valid_subset.split(',')
max_update = args.max_update or math.inf
should_end_training = False
for samples in progress:
with metrics.aggregate('train_inner'):
try:
log_output = trainer.train_step(samples)
except ResetTrainerException:
trainer._wrapped_criterion = None
trainer._wrapped_model = None
trainer._optimizer = None
logger.info("reset the trainer at {}".format(trainer.get_num_updates()))
log_output = trainer.train_step(samples)
if log_output is None: # OOM, overflow, ...
continue
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values('train_inner'))
progress.log(stats, tag='train_inner', step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters('train_inner')
valid_losses = validate_and_save(args, trainer, task, epoch_itr, valid_subsets)
if should_stop_early(args, valid_losses[0]) or num_updates >= max_update:
should_end_training = True
break
# log end-of-epoch stats
stats = get_training_stats(metrics.get_smoothed_values('train'))
progress.print(stats, tag='train', step=num_updates)
# reset epoch-level meters
metrics.reset_meters('train')
return should_end_training
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets):
num_updates = trainer.get_num_updates()
do_save = (
(
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
)
or (
epoch_itr.end_of_epoch()
and epoch_itr.epoch % args.save_interval == 0
)
)
do_validate = (
(
do_save # saving requires validation
or (
epoch_itr.end_of_epoch()
and epoch_itr.epoch % args.validate_interval == 0
)
)
and not args.disable_validation
)
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Save
if do_save:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses
def get_training_stats(stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
# reset dummy batch only for validation
trainer._dummy_batch = "DUMMY" # reset dummy batch
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens_valid,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for step, sample in enumerate(progress):
trainer.valid_step(sample)
stats = get_training_stats(agg.get_smoothed_values())
plog = progress.log
if hasattr(progress, "wrapped_bar"):
plog = progress.wrapped_bar.log
plog(stats, tag='valid', step=step)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
# reset dummy batch again for continuing training
trainer._dummy_batch = "DUMMY"
return valid_losses
def get_valid_stats(args, trainer, stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[args.best_checkpoint_metric],
)
return stats
def distributed_main(i, args, start_rank=0):
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = start_rank + i
main(args, init_distributed=True)
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
# single GPU training
main(args)
if __name__ == '__main__':
cli_main()
| 13,414 | 34.489418 | 117 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite1/generate.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
T=10
s=10
save1=randint(s,size=(T,s))
save2=randint(s,size=(T,s))
np.savetxt("mdp/S1",save1)
np.savetxt("mdp/S2",save2)
| 297 | 17.625 | 33 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite1/finite.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
import os
from multiprocessing import Process
#generate MDP
T=100
size=10
nS=size*size
nA=4
alpha=20
N=50000 #no of episodes
P=np.zeros((T,nS,nA,nS))
R=np.zeros((T,nS,nA,nS))
G=np.zeros((T,nS,nA,nS))
save1=np.loadtxt("mdp/S1").astype(int)
save2=np.loadtxt("mdp/S2").astype(int)
def get(a,b):
return (a%size)*size+b%size
def put(state):
return (state//size,state%size)
#Transition matrix
for j in range(nS):
a,b=put(j)
indices=[get(a,b+1),get(a,b-1),get(a+1,b),get(a-1,b)]
P[0,j,:,indices]=3
for k in range(nA):
P[0,j,k,indices[k]]=81
P[:,j,k]=P[0,j,k]/np.sum(P[0,j,k])
#Reward matrix
for ti in range(10):
for i in range(size):
b=save1[ti,i]
index=get(i,b)
R[ti*10:(ti+1)*10,:,:,index]=10
#Constraint cost matrix
for ti in range(10):
for i in range(size):
b=save2[ti,i]
index=[get(i,b)]
G[ti*10:(ti+1)*10,:,:,index]=10
RT=np.zeros(nS)
GT=np.zeros(nS)
ff=5
#state feature
def feat(state):
res=np.zeros(nS//ff)
res[state//ff]=(state%ff+1)/ff
return res
#actor feature
def phi(state,action):
res=np.zeros(nA*nS//ff)
res[action*nS//ff:(action+1)*nS//ff]=feat(state)
return res
#algorithm
def algo2(constrained,seed):
np.random.seed(seed)
#### get number of log files in log directory
logrd="fr"
logcd="fc"
run_num = 0
current_num_files = next(os.walk(logrd))[2]
run_num = len(current_num_files)
log_r = open(logrd+"/PPO_fr_log_"+str(run_num)+".csv","w+")
log_r.write('episode,reward\n')
run_num = 0
current_num_files = next(os.walk(logcd))[2]
run_num = len(current_num_files)
log_c = open(logcd+"/PPO_fc_log_"+str(run_num)+".csv","w+")
log_c.write('episode,reward\n')
K=5
epsilon=0.01
Low=-10000000 #P in projection operator
theta = np.zeros((T,nA*nS//ff))
value = np.zeros((T+1,nS//ff)) #parameter v
Y=0
gamma=0
n=1
J=np.zeros(N)
S=np.zeros(N)
returns=deque(maxlen=100)
violations=deque(maxlen=100)
beta=np.full(nS,1)
beta[0]=(nS-1)*9
beta=beta/np.sum(beta)
while n<=N:
#step-sizes
a=K/(n**0.55)
b=K/(n**0.8)
c=K/(n**1)
rewards = []
actions = []
states = []
constraints = []
actual=[]
state = choice(nS,p=beta) #start state
for i in range(T):
probs=softmax([np.dot(phi(state,k),theta[i]) for k in range(nA)])
action=choice(nA,p=probs/np.sum(probs))
#transition
new_state=choice(nS,p=P[i,state,action])
reward,constraint=R[i,state,action,new_state],G[i,state,action,new_state]
states.append(state)
actions.append(action)
constraints.append(constraint)
rewards.append(reward+gamma*constraint)
actual.append(reward)
state = new_state
#terminal
reward,constraint=RT[state],GT[state]
states.append(state)
constraints.append(constraint)
rewards.append(reward+gamma*(constraint-alpha))
actual.append(reward)
#learning
for i in range(T):
probs=softmax([np.dot(phi(states[i],k),theta[i]) for k in range(nA)])
delta=rewards[i]+np.dot(feat(states[i+1]),value[i+1])-np.dot(feat(states[i]),value[i])
psi=phi(states[i],actions[i])-np.sum([phi(states[i],k)*probs[k] for k in range(nA)])
value[i]+=a*delta*feat(states[i])
theta[i]+=b*((psi*delta)-(epsilon*theta[i]))
#terminal
delta=rewards[T]-np.dot(feat(states[T]),value[T])
value[T]+=a*delta*feat(states[T])
#lagrangian update
if(constrained):
gamma=max(Low,min(0,gamma-c*(Y-alpha)))
#recursion 25
Y=(1-a)*Y + a*np.sum(constraints)
returns.append(np.sum(actual))
violations.append(np.sum(constraints))
J[n-1]=np.mean(returns)
S[n-1]=np.mean(violations)
log_r.write('{},{}\n'.format(n,J[n-1]))
log_r.flush()
log_c.write('{},{}\n'.format(n,S[n-1]))
log_c.flush()
print("algo2:",n,":",J[n-1],S[n-1],gamma,np.sum(actual))
n+=1
log_r.close()
log_c.close()
print("Done")
if __name__ == '__main__':
seed=randint(10000)
algo2(False,seed)
| 4,541 | 23.159574 | 98 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite3/generate.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
T=20
s=10
save1=randint(s,size=(T,s))
save2=randint(s,size=(T,s))
np.savetxt("mdp/S1",save1)
np.savetxt("mdp/S2",save2)
| 297 | 17.625 | 33 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite3/finite.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
import os
from multiprocessing import Process
#generate MDP
T=100
size=10
nS=size*size
nA=4
alpha=20
N=50000 #no of episodes
P=np.zeros((T,nS,nA,nS))
R=np.zeros((T,nS,nA,nS))
G=np.zeros((T,nS,nA,nS))
save1=np.loadtxt("mdp/S1").astype(int)
save2=np.loadtxt("mdp/S2").astype(int)
def get(a,b):
return (a%size)*size+b%size
def put(state):
return (state//size,state%size)
#Transition matrix
for j in range(nS):
a,b=put(j)
indices=[get(a,b+1),get(a,b-1),get(a+1,b),get(a-1,b)]
P[0,j,:,indices]=3
for k in range(nA):
P[0,j,k,indices[k]]=81
P[:,j,k]=P[0,j,k]/np.sum(P[0,j,k])
#Reward matrix
for ti in range(20):
for i in range(size):
b=save1[ti,i]
index=get(i,b)
R[ti*5:(ti+1)*5,:,:,index]=10
#Constraint cost matrix
for ti in range(20):
for i in range(size):
b=save2[ti,i]
index=[get(i,b)]
G[ti*5:(ti+1)*5,:,:,index]=10
RT=np.zeros(nS)
GT=np.zeros(nS)
ff=5
#state feature
def feat(state):
res=np.zeros(nS//ff)
res[state//ff]=(state%ff+1)/ff
return res
#actor feature
def phi(state,action):
res=np.zeros(nA*nS//ff)
res[action*nS//ff:(action+1)*nS//ff]=feat(state)
return res
#algorithm
def algo2(constrained,seed):
np.random.seed(seed)
#### get number of log files in log directory
logrd="fr"
logcd="fc"
run_num = 0
current_num_files = next(os.walk(logrd))[2]
run_num = len(current_num_files)
log_r = open(logrd+"/PPO_fr_log_"+str(run_num)+".csv","w+")
log_r.write('episode,reward\n')
run_num = 0
current_num_files = next(os.walk(logcd))[2]
run_num = len(current_num_files)
log_c = open(logcd+"/PPO_fc_log_"+str(run_num)+".csv","w+")
log_c.write('episode,reward\n')
K=5
epsilon=0.01
Low=-10000000 #P in projection operator
theta = np.zeros((T,nA*nS//ff))
value = np.zeros((T+1,nS//ff)) #parameter v
Y=0
gamma=0
n=1
J=np.zeros(N)
S=np.zeros(N)
returns=deque(maxlen=100)
violations=deque(maxlen=100)
beta=np.full(nS,1)
beta[0]=(nS-1)*9
beta=beta/np.sum(beta)
while n<=N:
#step-sizes
a=K/(n**0.55)
b=K/(n**0.8)
c=K/(n**1)
rewards = []
actions = []
states = []
constraints = []
actual=[]
state = choice(nS,p=beta) #start state
for i in range(T):
probs=softmax([np.dot(phi(state,k),theta[i]) for k in range(nA)])
action=choice(nA,p=probs/np.sum(probs))
#transition
new_state=choice(nS,p=P[i,state,action])
reward,constraint=R[i,state,action,new_state],G[i,state,action,new_state]
states.append(state)
actions.append(action)
constraints.append(constraint)
rewards.append(reward+gamma*constraint)
actual.append(reward)
state = new_state
#terminal
reward,constraint=RT[state],GT[state]
states.append(state)
constraints.append(constraint)
rewards.append(reward+gamma*(constraint-alpha))
actual.append(reward)
#learning
for i in range(T):
probs=softmax([np.dot(phi(states[i],k),theta[i]) for k in range(nA)])
delta=rewards[i]+np.dot(feat(states[i+1]),value[i+1])-np.dot(feat(states[i]),value[i])
psi=phi(states[i],actions[i])-np.sum([phi(states[i],k)*probs[k] for k in range(nA)])
value[i]+=a*delta*feat(states[i])
theta[i]+=b*((psi*delta)-(epsilon*theta[i]))
#terminal
delta=rewards[T]-np.dot(feat(states[T]),value[T])
value[T]+=a*delta*feat(states[T])
#lagrangian update
if(constrained):
gamma=max(Low,min(0,gamma-c*(Y-alpha)))
#recursion 25
Y=(1-a)*Y + a*np.sum(constraints)
returns.append(np.sum(actual))
violations.append(np.sum(constraints))
J[n-1]=np.mean(returns)
S[n-1]=np.mean(violations)
log_r.write('{},{}\n'.format(n,J[n-1]))
log_r.flush()
log_c.write('{},{}\n'.format(n,S[n-1]))
log_c.flush()
print("algo2:",n,":",J[n-1],S[n-1],gamma,np.sum(actual))
n+=1
log_r.close()
log_c.close()
print("Done")
if __name__ == '__main__':
seed=randint(10000)
algo2(False,seed)
| 4,533 | 23.117021 | 98 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite_c3/generate.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
T=20
s=10
save1=randint(s,size=(T,s))
save2=randint(s,size=(T,s))
np.savetxt("mdp/S1",save1)
np.savetxt("mdp/S2",save2)
| 297 | 17.625 | 33 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite_c3/finite.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
import os
from multiprocessing import Process
#generate MDP
T=100
size=10
nS=size*size
nA=4
alpha=20
N=500000 #no of episodes
P=np.zeros((T,nS,nA,nS))
R=np.zeros((T,nS,nA,nS))
G=np.zeros((T,nS,nA,nS))
save1=np.loadtxt("mdp/S1").astype(int)
save2=np.loadtxt("mdp/S2").astype(int)
def get(a,b):
return (a%size)*size+b%size
def put(state):
return (state//size,state%size)
#Transition matrix
for j in range(nS):
a,b=put(j)
indices=[get(a,b+1),get(a,b-1),get(a+1,b),get(a-1,b)]
P[0,j,:,indices]=3
for k in range(nA):
P[0,j,k,indices[k]]=81
P[:,j,k]=P[0,j,k]/np.sum(P[0,j,k])
#Reward matrix
for ti in range(20):
for i in range(size):
b=save1[ti,i]
index=get(i,b)
R[ti*5:(ti+1)*5,:,:,index]=10
#Constraint cost matrix
for ti in range(20):
for i in range(size):
b=save2[ti,i]
index=[get(i,b)]
G[ti*5:(ti+1)*5,:,:,index]=10
RT=np.zeros(nS)
GT=np.zeros(nS)
ff=5
#state feature
def feat(state):
res=np.zeros(nS//ff)
res[state//ff]=(state%ff+1)/ff
return res
#actor feature
def phi(state,action):
res=np.zeros(nA*nS//ff)
res[action*nS//ff:(action+1)*nS//ff]=feat(state)
return res
#algorithm
def algo2(constrained,seed):
np.random.seed(seed)
#### get number of log files in log directory
logrd="fr"
logcd="fc"
run_num = 0
current_num_files = next(os.walk(logrd))[2]
run_num = len(current_num_files)
log_r = open(logrd+"/PPO_fr_log_"+str(run_num)+".csv","w+")
log_r.write('episode,reward\n')
run_num = 0
current_num_files = next(os.walk(logcd))[2]
run_num = len(current_num_files)
log_c = open(logcd+"/PPO_fc_log_"+str(run_num)+".csv","w+")
log_c.write('episode,reward\n')
K=1.5
epsilon=0.01
Low=-10000000 #P in projection operator
theta = np.zeros((T,nA*nS//ff))
value = np.zeros((T+1,nS//ff)) #parameter v
Y=0
gamma=0
n=1
J=np.zeros(N)
S=np.zeros(N)
returns=deque(maxlen=100)
violations=deque(maxlen=100)
beta=np.full(nS,1)
beta[0]=(nS-1)*9
beta=beta/np.sum(beta)
while n<=N:
#step-sizes
a=K/(n**0.55)
b=K/(n**0.8)
c=K/(n**1)
rewards = []
actions = []
states = []
constraints = []
actual=[]
state = choice(nS,p=beta) #start state
for i in range(T):
probs=softmax([np.dot(phi(state,k),theta[i]) for k in range(nA)])
action=choice(nA,p=probs/np.sum(probs))
#transition
new_state=choice(nS,p=P[i,state,action])
reward,constraint=R[i,state,action,new_state],G[i,state,action,new_state]
states.append(state)
actions.append(action)
constraints.append(constraint)
rewards.append(reward+gamma*constraint)
actual.append(reward)
state = new_state
#terminal
reward,constraint=RT[state],GT[state]
states.append(state)
constraints.append(constraint)
rewards.append(reward+gamma*(constraint-alpha))
actual.append(reward)
#learning
for i in range(T):
probs=softmax([np.dot(phi(states[i],k),theta[i]) for k in range(nA)])
delta=rewards[i]+np.dot(feat(states[i+1]),value[i+1])-np.dot(feat(states[i]),value[i])
psi=phi(states[i],actions[i])-np.sum([phi(states[i],k)*probs[k] for k in range(nA)])
value[i]+=a*delta*feat(states[i])
theta[i]+=b*((psi*delta)-(epsilon*theta[i]))
#terminal
delta=rewards[T]-np.dot(feat(states[T]),value[T])
value[T]+=a*delta*feat(states[T])
#lagrangian update
if(constrained):
gamma=max(Low,min(0,gamma-c*(Y-alpha)))
#recursion 25
Y=(1-a)*Y + a*np.sum(constraints)
returns.append(np.sum(actual))
violations.append(np.sum(constraints))
J[n-1]=np.mean(returns)
S[n-1]=np.mean(violations)
log_r.write('{},{}\n'.format(n,J[n-1]))
log_r.flush()
log_c.write('{},{}\n'.format(n,S[n-1]))
log_c.flush()
print("algo2:",n,":",J[n-1],S[n-1],gamma,np.sum(actual))
n+=1
log_r.close()
log_c.close()
print("Done")
if __name__ == '__main__':
seed=randint(10000)
algo2(True,seed)
| 4,535 | 23.12766 | 98 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite_c2/generate.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
T=100
s=10
save1=randint(s,size=(T,s))
save2=randint(s,size=(T,s))
np.savetxt("mdp/S1",save1)
np.savetxt("mdp/S2",save2)
| 298 | 17.6875 | 33 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite_c2/finite.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
import os
from multiprocessing import Process
#generate MDP
T=100
size=10
nS=size*size
nA=4
alpha=20
N=500000 #no of episodes
P=np.zeros((T,nS,nA,nS))
R=np.zeros((T,nS,nA,nS))
G=np.zeros((T,nS,nA,nS))
save1=np.loadtxt("mdp/S1").astype(int)
save2=np.loadtxt("mdp/S2").astype(int)
def get(a,b):
return (a%size)*size+b%size
def put(state):
return (state//size,state%size)
#Transition matrix
for j in range(nS):
a,b=put(j)
indices=[get(a,b+1),get(a,b-1),get(a+1,b),get(a-1,b)]
P[0,j,:,indices]=3
for k in range(nA):
P[0,j,k,indices[k]]=81
P[:,j,k]=P[0,j,k]/np.sum(P[0,j,k])
#Reward matrix
for ti in range(100):
for i in range(size):
b=save1[ti,i]
index=get(i,b)
R[ti*1:(ti+1)*1,:,:,index]=10
#Constraint cost matrix
for ti in range(100):
for i in range(size):
b=save2[ti,i]
index=[get(i,b)]
G[ti*1:(ti+1)*1,:,:,index]=10
RT=np.zeros(nS)
GT=np.zeros(nS)
ff=5
#state feature
def feat(state):
res=np.zeros(nS//ff)
res[state//ff]=(state%ff+1)/ff
return res
#actor feature
def phi(state,action):
res=np.zeros(nA*nS//ff)
res[action*nS//ff:(action+1)*nS//ff]=feat(state)
return res
#algorithm
def algo2(constrained,seed):
np.random.seed(seed)
#### get number of log files in log directory
logrd="fr"
logcd="fc"
run_num = 0
current_num_files = next(os.walk(logrd))[2]
run_num = len(current_num_files)
log_r = open(logrd+"/PPO_fr_log_"+str(run_num)+".csv","w+")
log_r.write('episode,reward\n')
run_num = 0
current_num_files = next(os.walk(logcd))[2]
run_num = len(current_num_files)
log_c = open(logcd+"/PPO_fc_log_"+str(run_num)+".csv","w+")
log_c.write('episode,reward\n')
K=1.5
epsilon=0.01
Low=-10000000 #P in projection operator
theta = np.zeros((T,nA*nS//ff))
value = np.zeros((T+1,nS//ff)) #parameter v
Y=0
gamma=0
n=1
J=np.zeros(N)
S=np.zeros(N)
returns=deque(maxlen=100)
violations=deque(maxlen=100)
beta=np.full(nS,1)
beta[0]=(nS-1)*9
beta=beta/np.sum(beta)
while n<=N:
#step-sizes
a=K/(n**0.55)
b=K/(n**0.8)
c=K/(n**1)
rewards = []
actions = []
states = []
constraints = []
actual=[]
state = choice(nS,p=beta) #start state
for i in range(T):
probs=softmax([np.dot(phi(state,k),theta[i]) for k in range(nA)])
action=choice(nA,p=probs/np.sum(probs))
#transition
new_state=choice(nS,p=P[i,state,action])
reward,constraint=R[i,state,action,new_state],G[i,state,action,new_state]
states.append(state)
actions.append(action)
constraints.append(constraint)
rewards.append(reward+gamma*constraint)
actual.append(reward)
state = new_state
#terminal
reward,constraint=RT[state],GT[state]
states.append(state)
constraints.append(constraint)
rewards.append(reward+gamma*(constraint-alpha))
actual.append(reward)
#learning
for i in range(T):
probs=softmax([np.dot(phi(states[i],k),theta[i]) for k in range(nA)])
delta=rewards[i]+np.dot(feat(states[i+1]),value[i+1])-np.dot(feat(states[i]),value[i])
psi=phi(states[i],actions[i])-np.sum([phi(states[i],k)*probs[k] for k in range(nA)])
value[i]+=a*delta*feat(states[i])
theta[i]+=b*((psi*delta)-(epsilon*theta[i]))
#terminal
delta=rewards[T]-np.dot(feat(states[T]),value[T])
value[T]+=a*delta*feat(states[T])
#lagrangian update
if(constrained):
gamma=max(Low,min(0,gamma-c*(Y-alpha)))
#recursion 25
Y=(1-a)*Y + a*np.sum(constraints)
returns.append(np.sum(actual))
violations.append(np.sum(constraints))
J[n-1]=np.mean(returns)
S[n-1]=np.mean(violations)
log_r.write('{},{}\n'.format(n,J[n-1]))
log_r.flush()
log_c.write('{},{}\n'.format(n,S[n-1]))
log_c.flush()
print("algo2:",n,":",J[n-1],S[n-1],gamma,np.sum(actual))
n+=1
log_r.close()
log_c.close()
print("Done")
if __name__ == '__main__':
seed=randint(10000)
algo2(True,seed)
| 4,537 | 23.138298 | 98 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite_c1/generate.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
T=10
s=10
save1=randint(s,size=(T,s))
save2=randint(s,size=(T,s))
np.savetxt("mdp/S1",save1)
np.savetxt("mdp/S2",save2)
| 297 | 17.625 | 33 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite_c1/finite.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
import os
from multiprocessing import Process
#generate MDP
T=100
size=10
nS=size*size
nA=4
alpha=20
N=500000 #no of episodes
P=np.zeros((T,nS,nA,nS))
R=np.zeros((T,nS,nA,nS))
G=np.zeros((T,nS,nA,nS))
save1=np.loadtxt("mdp/S1").astype(int)
save2=np.loadtxt("mdp/S2").astype(int)
def get(a,b):
return (a%size)*size+b%size
def put(state):
return (state//size,state%size)
#Transition matrix
for j in range(nS):
a,b=put(j)
indices=[get(a,b+1),get(a,b-1),get(a+1,b),get(a-1,b)]
P[0,j,:,indices]=3
for k in range(nA):
P[0,j,k,indices[k]]=81
P[:,j,k]=P[0,j,k]/np.sum(P[0,j,k])
#Reward matrix
for ti in range(10):
for i in range(size):
b=save1[ti,i]
index=get(i,b)
R[ti*10:(ti+1)*10,:,:,index]=10
#Constraint cost matrix
for ti in range(10):
for i in range(size):
b=save2[ti,i]
index=[get(i,b)]
G[ti*10:(ti+1)*10,:,:,index]=10
RT=np.zeros(nS)
GT=np.zeros(nS)
ff=5
#state feature
def feat(state):
res=np.zeros(nS//ff)
res[state//ff]=(state%ff+1)/ff
return res
#actor feature
def phi(state,action):
res=np.zeros(nA*nS//ff)
res[action*nS//ff:(action+1)*nS//ff]=feat(state)
return res
#algorithm
def algo2(constrained,seed):
np.random.seed(seed)
#### get number of log files in log directory
logrd="fr"
logcd="fc"
run_num = 0
current_num_files = next(os.walk(logrd))[2]
run_num = len(current_num_files)
log_r = open(logrd+"/PPO_fr_log_"+str(run_num)+".csv","w+")
log_r.write('episode,reward\n')
run_num = 0
current_num_files = next(os.walk(logcd))[2]
run_num = len(current_num_files)
log_c = open(logcd+"/PPO_fc_log_"+str(run_num)+".csv","w+")
log_c.write('episode,reward\n')
K=1.5
epsilon=0.01
Low=-10000000 #P in projection operator
theta = np.zeros((T,nA*nS//ff))
value = np.zeros((T+1,nS//ff)) #parameter v
Y=0
gamma=0
n=1
J=np.zeros(N)
S=np.zeros(N)
returns=deque(maxlen=100)
violations=deque(maxlen=100)
beta=np.full(nS,1)
beta[0]=(nS-1)*9
beta=beta/np.sum(beta)
while n<=N:
#step-sizes
a=K/(n**0.55)
b=K/(n**0.8)
c=K/(n**1)
rewards = []
actions = []
states = []
constraints = []
actual=[]
state = choice(nS,p=beta) #start state
for i in range(T):
probs=softmax([np.dot(phi(state,k),theta[i]) for k in range(nA)])
action=choice(nA,p=probs/np.sum(probs))
#transition
new_state=choice(nS,p=P[i,state,action])
reward,constraint=R[i,state,action,new_state],G[i,state,action,new_state]
states.append(state)
actions.append(action)
constraints.append(constraint)
rewards.append(reward+gamma*constraint)
actual.append(reward)
state = new_state
#terminal
reward,constraint=RT[state],GT[state]
states.append(state)
constraints.append(constraint)
rewards.append(reward+gamma*(constraint-alpha))
actual.append(reward)
#learning
for i in range(T):
probs=softmax([np.dot(phi(states[i],k),theta[i]) for k in range(nA)])
delta=rewards[i]+np.dot(feat(states[i+1]),value[i+1])-np.dot(feat(states[i]),value[i])
psi=phi(states[i],actions[i])-np.sum([phi(states[i],k)*probs[k] for k in range(nA)])
value[i]+=a*delta*feat(states[i])
theta[i]+=b*((psi*delta)-(epsilon*theta[i]))
#terminal
delta=rewards[T]-np.dot(feat(states[T]),value[T])
value[T]+=a*delta*feat(states[T])
#lagrangian update
if(constrained):
gamma=max(Low,min(0,gamma-c*(Y-alpha)))
#recursion 25
Y=(1-a)*Y + a*np.sum(constraints)
returns.append(np.sum(actual))
violations.append(np.sum(constraints))
J[n-1]=np.mean(returns)
S[n-1]=np.mean(violations)
log_r.write('{},{}\n'.format(n,J[n-1]))
log_r.flush()
log_c.write('{},{}\n'.format(n,S[n-1]))
log_c.flush()
print("algo2:",n,":",J[n-1],S[n-1],gamma,np.sum(actual))
n+=1
log_r.close()
log_c.close()
print("Done")
if __name__ == '__main__':
seed=randint(10000)
algo2(True,seed)
| 4,543 | 23.170213 | 98 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite2/generate.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
T=100
s=10
save1=randint(s,size=(T,s))
save2=randint(s,size=(T,s))
np.savetxt("mdp/S1",save1)
np.savetxt("mdp/S2",save2)
| 298 | 17.6875 | 33 |
py
|
Finite-Horizon-with-constraints
|
Finite-Horizon-with-constraints-master/finite2/finite.py
|
import numpy as np
from numpy.random import choice
from numpy.random import randint
from scipy.special import softmax
from collections import deque
from random import sample
import os
from multiprocessing import Process
#generate MDP
T=100
size=10
nS=size*size
nA=4
alpha=20
N=50000 #no of episodes
P=np.zeros((T,nS,nA,nS))
R=np.zeros((T,nS,nA,nS))
G=np.zeros((T,nS,nA,nS))
save1=np.loadtxt("mdp/S1").astype(int)
save2=np.loadtxt("mdp/S2").astype(int)
def get(a,b):
return (a%size)*size+b%size
def put(state):
return (state//size,state%size)
#Transition matrix
for j in range(nS):
a,b=put(j)
indices=[get(a,b+1),get(a,b-1),get(a+1,b),get(a-1,b)]
P[0,j,:,indices]=3
for k in range(nA):
P[0,j,k,indices[k]]=81
P[:,j,k]=P[0,j,k]/np.sum(P[0,j,k])
#Reward matrix
for ti in range(100):
for i in range(size):
b=save1[ti,i]
index=get(i,b)
R[ti*1:(ti+1)*1,:,:,index]=10
#Constraint cost matrix
for ti in range(100):
for i in range(size):
b=save2[ti,i]
index=[get(i,b)]
G[ti*1:(ti+1)*1,:,:,index]=10
RT=np.zeros(nS)
GT=np.zeros(nS)
ff=5
#state feature
def feat(state):
res=np.zeros(nS//ff)
res[state//ff]=(state%ff+1)/ff
return res
#actor feature
def phi(state,action):
res=np.zeros(nA*nS//ff)
res[action*nS//ff:(action+1)*nS//ff]=feat(state)
return res
#algorithm
def algo2(constrained,seed):
np.random.seed(seed)
#### get number of log files in log directory
logrd="fr"
logcd="fc"
run_num = 0
current_num_files = next(os.walk(logrd))[2]
run_num = len(current_num_files)
log_r = open(logrd+"/PPO_fr_log_"+str(run_num)+".csv","w+")
log_r.write('episode,reward\n')
run_num = 0
current_num_files = next(os.walk(logcd))[2]
run_num = len(current_num_files)
log_c = open(logcd+"/PPO_fc_log_"+str(run_num)+".csv","w+")
log_c.write('episode,reward\n')
K=5
epsilon=0.01
Low=-10000000 #P in projection operator
theta = np.zeros((T,nA*nS//ff))
value = np.zeros((T+1,nS//ff)) #parameter v
Y=0
gamma=0
n=1
J=np.zeros(N)
S=np.zeros(N)
returns=deque(maxlen=100)
violations=deque(maxlen=100)
beta=np.full(nS,1)
beta[0]=(nS-1)*9
beta=beta/np.sum(beta)
while n<=N:
#step-sizes
a=K/(n**0.55)
b=K/(n**0.8)
c=K/(n**1)
rewards = []
actions = []
states = []
constraints = []
actual=[]
state = choice(nS,p=beta) #start state
for i in range(T):
probs=softmax([np.dot(phi(state,k),theta[i]) for k in range(nA)])
action=choice(nA,p=probs/np.sum(probs))
#transition
new_state=choice(nS,p=P[i,state,action])
reward,constraint=R[i,state,action,new_state],G[i,state,action,new_state]
states.append(state)
actions.append(action)
constraints.append(constraint)
rewards.append(reward+gamma*constraint)
actual.append(reward)
state = new_state
#terminal
reward,constraint=RT[state],GT[state]
states.append(state)
constraints.append(constraint)
rewards.append(reward+gamma*(constraint-alpha))
actual.append(reward)
#learning
for i in range(T):
probs=softmax([np.dot(phi(states[i],k),theta[i]) for k in range(nA)])
delta=rewards[i]+np.dot(feat(states[i+1]),value[i+1])-np.dot(feat(states[i]),value[i])
psi=phi(states[i],actions[i])-np.sum([phi(states[i],k)*probs[k] for k in range(nA)])
value[i]+=a*delta*feat(states[i])
theta[i]+=b*((psi*delta)-(epsilon*theta[i]))
#terminal
delta=rewards[T]-np.dot(feat(states[T]),value[T])
value[T]+=a*delta*feat(states[T])
#lagrangian update
if(constrained):
gamma=max(Low,min(0,gamma-c*(Y-alpha)))
#recursion 25
Y=(1-a)*Y + a*np.sum(constraints)
returns.append(np.sum(actual))
violations.append(np.sum(constraints))
J[n-1]=np.mean(returns)
S[n-1]=np.mean(violations)
log_r.write('{},{}\n'.format(n,J[n-1]))
log_r.flush()
log_c.write('{},{}\n'.format(n,S[n-1]))
log_c.flush()
print("algo2:",n,":",J[n-1],S[n-1],gamma,np.sum(actual))
n+=1
log_r.close()
log_c.close()
print("Done")
if __name__ == '__main__':
seed=randint(10000)
algo2(False,seed)
| 4,535 | 23.12766 | 98 |
py
|
penneysgame
|
penneysgame-master/conway.py
|
#!/usr/bin/env python
'''
conway.py: For solving generalized Penney's game with
generalized Conway formula, including simulations.
For background, see Miller(2019) ''
'''
import numpy as np
__author__ = "Joshua B. Miller"
__copyright__ = "Creative Commons"
__credits__ = "none"
__license__ = "GPL"
__version__ = "0.0.1"
__maintainer__ = "Joshua B. Miller"
__email__ = "[email protected]"
__status__ = "Prototype"
def payoff_to_B_bets_if_A_occurs_first(A,B,alphabet):
''' (string, string, dictionary)-> (float)
The fair payoff to all B bets if pattern A appears first.
This function calculates the fair payoff to someone who initiates a
fresh sequence of bets each period in which bets anticipate pattern B
note: Assuming the sequence ends at trial t>len(B), then when pattern A occurs
there will be up to len(B) ongoing, and overlapping, B-bet sequences .
For example:
>>>A='THH'
>>>B='HHH'
>>>alphabet={'T':.5, 'H':.5})
>>>AB=payoff_to_B_bets_if_A_occurs_first(A,B,alphabet)
Then in this case AB=4+2=6 as B betters who enter at T-2 lose immediately,
those who enter at T-1 win twice, and those who enter at T win once.
'''
#make sure alphabet is a valid categorical distribution
#(tolerate 1e-10 deviation; not too strict on the sum for precision issues)
if abs(sum(alphabet.values())-1) > 1e-10:
raise Exception("Alphabet is not a valid probability distribution")
#make sure keys are strings
if any( type(el) is not str for el in alphabet.keys() ) :
raise Exception("only strings please")
#make sure strings are of length 1
if any( len(el)>1 for el in alphabet.keys() ) :
raise Exception("Strings must be length 1")
#Make sure all characters in the patterns appear in the Alphabet
if any(char not in alphabet.keys() for char in A+B ):
raise Exception("All chacters must appear in the Alphabet")
#make sure B is not a strict substring of A (or it will appear first for sure)
# and vice-versa
if ( len(B)<len(A) and A.find(B)>-1) or ( len(A)<len(B) and B.find(A)>-1):
raise Exception("one string cannot be a strict substring of another")
# Calculate AB, the total payoffs from each sequence of bets anticipating pattern B
# that are still active when the sequence stops at A
AB = 0
for i in range(len(A)):
A_trailing = A[i:]
B_leading = B[0:len(A_trailing)]
if A_trailing == B_leading:
#The sequence of bets anticipating B that are initiated at i (relatively)
#need to be paid when A occurs if there is perfect overlap of the leading characters of B
# with the trailing characters of A
#Why?The person waiting for B to occcur hasn't gone bankrupt yet,
#This person gets paid for betting correctly on every realization in A_trailing
#On bet i, "wealth" is the amount invested predicting the event A_trailing[i],
#this investment gets a fair gross rate of return
#equal to the inverse of the probability of the event (1/alphabet[A_trailing[i]])
wealth=1
for i in range(len(A_trailing)):
gross_return = 1/alphabet[A_trailing[i]]
wealth = wealth*gross_return
AB = AB + wealth
return AB
def oddsAB(A,B,alphabet):
''' (string, string, dictionary)-> [list]
returns odds against pattern A preceding pattern B
odds[0] = "chances against A"
odds[1] = "chances in favor of A"
note: odds= 2* Conway's odds; see Miller (2019) for proof
'''
if A==B:
raise Exception("A==B; patterns cannot precede themselves")
elif ( len(B)<len(A) and A.find(B)>-1): #if B is strict substring of A
odds = [1,0]
elif ( len(A)<len(B) and B.find(A)>-1): #if A is strict substring of B
odds = [0,1]
else:
AA= payoff_to_B_bets_if_A_occurs_first(A,A,alphabet)
AB = payoff_to_B_bets_if_A_occurs_first(A,B,alphabet)
BB = payoff_to_B_bets_if_A_occurs_first(B,B,alphabet)
BA = payoff_to_B_bets_if_A_occurs_first(B,A,alphabet)
odds = [AA-AB , BB-BA]
return odds
def probAB(A,B,alphabet):
''' (string, string, dictionary)-> (float)
probability pattern A precedes pattern B
note: odds are o[0] chances against for every o[1] chances in favor
there are o[0]+o[1]
'''
o = oddsAB(A,B,alphabet)
return o[1]/(o[0]+o[1])
def expected_waiting_time(A,B,alphabet):
''' (string, string, dictionary)-> (float)
expected waiting time until the first occurance of A or B
see Miller (2019) for derivation
'''
if A==B:
wait = payoff_to_B_bets_if_A_occurs_first(A,A,alphabet)
elif ( len(B)<len(A) and A.find(B)>-1): #if B is strict substring of A
wait = payoff_to_B_bets_if_A_occurs_first(B,B,alphabet)
elif ( len(A)<len(B) and B.find(A)>-1): #if A is strict substring of B
wait = payoff_to_B_bets_if_A_occurs_first(A,A,alphabet)
else:
AA= payoff_to_B_bets_if_A_occurs_first(A,A,alphabet)
AB = payoff_to_B_bets_if_A_occurs_first(A,B,alphabet)
BB = payoff_to_B_bets_if_A_occurs_first(B,B,alphabet)
BA = payoff_to_B_bets_if_A_occurs_first(B,A,alphabet)
wait = (AA*BB - AB*BA)/(AA + BB - AB - BA)
return wait
def simulate_winrates_penney_game(A,B,alphabet,number_of_sequences):
'''
(string, string, dictionary, integer)-> (list)
Play generalized Penney's game and calculate how often
pattern A precedes pattern B, and vice versa
'''
N = number_of_sequences
#The letters in the dicitonary have a categorical distribution
#defined by the key, value pairs
outcomes = list(alphabet.keys())
probabilities = list(alphabet.values())
n_wins = np.array([0, 0])
n_flips = 0
for i in range(N):
max_length=max(len(A),len(B))
window = ['!']* max_length
#on each experiment draw from dictionary until either pattern A,
# or pattern B appears
while True:
window.pop(0)
draw=np.random.choice(outcomes, 1, replace=True, p=probabilities)
n_flips += 1
window.append(draw[0])
ch_window = "".join(map(str,window))
if ch_window[max_length-len(A):] == A:
n_wins[0] += 1
break
elif ch_window[max_length-len(B):] == B:
n_wins[1] += 1
break
winrates = n_wins/N
av_n_flips = n_flips/N
return winrates, av_n_flips
def all_patterns(j,alphabet):
'''
recusively builds all patterns of length j from alphabet
note: before calling must initialize following two lists within module:
>>>k=3
>>>conway.list_pattern=['-']*k
>>>conway.patterns = []
>>>conway.all_patterns(k,alphabet)
>>>patterns = conway.patterns
'''
global list_pattern
global patterns
if j == 1:
for key in alphabet.keys():
list_pattern[-j] = key
string_pattern = ''.join(list_pattern)
patterns.append(string_pattern)
else:
for key in alphabet.keys():
list_pattern[-j] = key
all_patterns(j-1,alphabet)
| 7,370 | 35.490099 | 102 |
py
|
RegularizedBN
|
RegularizedBN-main/inference.py
|
from fairseq.models.roberta import RobertaModel
roberta = RobertaModel.from_pretrained(
'./checkpoints/transformer_roberta_large_rte/',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='RTE-bin'
)
label_fn = lambda label: roberta.task.label_dictionary.string(
[label + roberta.task.label_dictionary.nspecial]
)
ncorrect, nsamples = 0, 0
roberta.cuda()
roberta.eval()
with open('glue_data/RTE/dev.tsv') as fin:
fin.readline()
for index, line in enumerate(fin):
tokens = line.strip().split('\t')
sent1, sent2, target = tokens[1], tokens[2], tokens[3]
tokens = roberta.encode(sent1, sent2)
prediction = roberta.predict('sentence_classification_head', tokens).argmax().item()
prediction_label = label_fn(prediction)
ncorrect += int(prediction_label == target)
nsamples += 1
print('| Accuracy: ', float(ncorrect)/float(nsamples))
| 914 | 34.192308 | 92 |
py
|
RegularizedBN
|
RegularizedBN-main/setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import setup, find_packages, Extension
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python >= 3.6 is required for fairseq.')
with open('README.md') as f:
readme = f.read()
if sys.platform == 'darwin':
extra_compile_args = ['-stdlib=libc++', '-O3']
else:
extra_compile_args = ['-std=c++11', '-O3']
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
'fairseq.libbleu',
sources=[
'fairseq/clib/libbleu/libbleu.cpp',
'fairseq/clib/libbleu/module.cpp',
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.data_utils_fast',
sources=['fairseq/data/data_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.token_block_utils_fast',
sources=['fairseq/data/token_block_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat',
sources=[
'fairseq/clib/libnat/edit_dist.cpp',
],
)
])
if 'CUDA_HOME' in os.environ:
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat_cuda',
sources=[
'fairseq/clib/libnat_cuda/edit_dist.cu',
'fairseq/clib/libnat_cuda/binding.cpp'
],
)])
cmdclass['build_ext'] = cpp_extension.BuildExtension
except ImportError:
pass
if 'READTHEDOCS' in os.environ:
# don't build extensions when generating docs
extensions = []
if 'build_ext' in cmdclass:
del cmdclass['build_ext']
# use CPU build of PyTorch
dependency_links = [
'https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl'
]
else:
dependency_links = []
if 'clean' in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(['rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd'], shell=True)
setup(
name='fairseq',
version='0.9.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
setup_requires=[
'cython',
'numpy',
'setuptools>=18.0',
],
install_requires=[
'cffi',
'cython',
'editdistance',
'numpy',
'regex',
'sacrebleu',
'torch',
'tqdm',
],
dependency_links=dependency_links,
packages=find_packages(exclude=['scripts', 'tests']),
ext_modules=extensions,
test_suite='tests',
entry_points={
'console_scripts': [
'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main',
'fairseq-generate = fairseq_cli.generate:cli_main',
'fairseq-interactive = fairseq_cli.interactive:cli_main',
'fairseq-preprocess = fairseq_cli.preprocess:cli_main',
'fairseq-score = fairseq_cli.score:cli_main',
'fairseq-train = fairseq_cli.train:cli_main',
'fairseq-validate = fairseq_cli.validate:cli_main',
],
},
cmdclass=cmdclass,
zip_safe=False,
)
| 4,389 | 25.768293 | 101 |
py
|
RegularizedBN
|
RegularizedBN-main/hubconf.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from fairseq.hub_utils import BPEHubInterface as bpe # noqa
from fairseq.hub_utils import TokenizerHubInterface as tokenizer # noqa
from fairseq.models import MODEL_REGISTRY
dependencies = [
'numpy',
'regex',
'requests',
'torch',
]
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast
except (ImportError, ModuleNotFoundError):
try:
import cython
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), 'setup.py'),
['build_ext', '--inplace'],
)
except (ImportError, ModuleNotFoundError):
print(
'Unable to build Cython components. Please make sure Cython is '
'installed if the torch.hub model you are loading depends on it.'
)
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
# to simplify the interface we only expose named models
# globals()[_model_type] = _cls.from_pretrained
| 1,432 | 28.244898 | 78 |
py
|
RegularizedBN
|
RegularizedBN-main/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
"""
from fairseq_cli.train import cli_main
if __name__ == '__main__':
cli_main()
| 366 | 23.466667 | 70 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__version__ = '0.9.0'
import examples.noisychannel # noqa
| 238 | 25.555556 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/wav2vec/vq-wav2vec_featurize.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import pprint
import glob, os, argparse
import torch
from torch import nn
try:
import tqdm
except:
print("Install tqdm to use --log-format=tqdm")
from fairseq.models.wav2vec.wav2vec import Wav2VecModel
import tqdm
import soundfile as sf
from torch.utils.data import DataLoader
import os.path as osp
class FilesDataset:
def __init__(self, files, labels):
self.files = files
if labels and osp.exists(labels):
with open(labels, 'r') as lbl_f:
self.labels = [line.rstrip() for line in lbl_f]
else:
self.labels = labels
def __len__(self):
return len(self.files)
def __getitem__(self, index):
fname = self.files[index]
wav, sr = sf.read(fname)
assert sr == 16000
wav = torch.from_numpy(wav).float()
lbls = None
if self.labels:
if isinstance(self.labels, str):
lbl_file = osp.splitext(fname)[0] + "." + self.labels
with open(lbl_file, 'r') as lblf:
lbls = lblf.readline()
assert lbls is not None
else:
lbls = self.labels[index]
return wav, lbls
def collate(self, batch):
return batch
class ArgTypes:
@staticmethod
def existing_path(arg):
arg = str(arg)
assert osp.exists(arg), f"File {arg} does not exist"
return arg
@staticmethod
def mkdir(arg):
arg = str(arg)
os.makedirs(arg, exist_ok=True)
return arg
class DatasetWriter:
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
fnames = [
osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
]
return fnames
def process_splits(self):
if self.args.shard is not None or self.args.num_shards is not None:
assert self.args.shard is not None and self.args.num_shards is not None
for split in self.splits:
print(split)
if self.extension == "tsv":
datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
print("Reading manifest file: ", datadir)
files = self.read_manifest(datadir)
else:
datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
files = glob.glob(datadir, recursive=True)
assert len(files) > 0
if self.args.shard is not None:
files = files[self.args.shard::self.args.num_shards]
lbls = []
with open(self.data_file(split), 'w') as srcf:
for line, lbl in self.iterate(files):
print(line, file=srcf)
if self.args.labels:
lbls.append(lbl + '\n')
if self.args.labels:
assert all(a is not None for a in lbls)
with open(self.lbl_file(split), 'w') as lblf:
lblf.writelines(lbls)
def iterate(self, files):
data = self.load_data(files)
for samples in tqdm.tqdm(data, total=len(files)//32):
for wav, lbl in samples:
x = wav.unsqueeze(0).float().cuda()
div = 1
while x.size(-1) // div > self.args.max_size:
div += 1
xs = x.chunk(div, dim=-1)
result = []
for x in xs:
torch.cuda.empty_cache()
x = self.model.feature_extractor(x)
if self.quantize_location == "encoder":
with torch.no_grad():
_, idx = self.model.vector_quantizer.forward_idx(x)
idx = idx.squeeze(0).cpu()
else:
with torch.no_grad():
z = self.model.feature_aggregator(x)
_, idx = self.model.vector_quantizer.forward_idx(z)
idx = idx.squeeze(0).cpu()
result.append(idx)
idx = torch.cat(result, dim=0)
yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
def lbl_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
def data_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.src{shard_part}")
def var_file(self):
return osp.join(self.output_dir, f"vars.pt")
def load_config(self):
parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
# Model Arguments
parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
parser.add_argument("--data-parallel", action="store_true")
# Output Arguments
parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
# Data Arguments
parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
parser.add_argument("--splits", type=str, nargs="+", required=True)
parser.add_argument("--extension", type=str, required=True)
parser.add_argument("--labels", type=str, required=False)
parser.add_argument("--shard", type=int, default=None)
parser.add_argument("--num-shards", type=int, default=None)
parser.add_argument("--max-size", type=int, default=1300000)
# Logger Arguments
parser.add_argument(
"--log-format", type=str, choices=["none", "simple", "tqdm"]
)
return parser.parse_args()
def load_data(self, fnames):
dataset = FilesDataset(fnames, self.args.labels)
loader = DataLoader(
dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
)
return loader
def load_model(self):
cp = torch.load(self.checkpoint, map_location=lambda x, _: x)
model = Wav2VecModel.build_model(cp["args"], None)
self.quantize_location = getattr(cp["args"], "vq", "encoder")
model.load_state_dict(cp["model"])
model.eval().float()
model.cuda()
if self.data_parallel:
model = nn.DataParallel(model)
return model
def __call__(self):
self.process_splits()
if hasattr(self.model.feature_extractor, "vars") and (self.args.shard is None or self.args.shard == 0):
vars = (
self.model.feature_extractor.vars.view(
self.model.feature_extractor.banks,
self.model.feature_extractor.num_vars,
-1,
)
.cpu()
.detach()
)
print("writing learned latent variable embeddings: ", vars.shape)
torch.save(vars, self.var_file())
if __name__ == "__main__":
write_data = DatasetWriter()
write_data()
print("Done.")
| 7,714 | 29.737052 | 111 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/wav2vec/wav2vec_manifest.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
import glob
import os
import soundfile
import random
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('root', metavar='DIR', help='root directory containing flac files to index')
parser.add_argument('--valid-percent', default=0.01, type=float, metavar='D',
help='percentage of data to use as validation set (between 0 and 1)')
parser.add_argument('--dest', default='.', type=str, metavar='DIR', help='output directory')
parser.add_argument('--ext', default='flac', type=str, metavar='EXT', help='extension to look for')
parser.add_argument('--seed', default=42, type=int, metavar='N', help='random seed')
parser.add_argument('--path-must-contain', default=None, type=str, metavar='FRAG',
help='if set, path must contain this substring for a file to be included in the manifest')
return parser
def main(args):
assert args.valid_percent >= 0 and args.valid_percent <= 1.
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, '**/*.' + args.ext)
rand = random.Random(args.seed)
with open(os.path.join(args.dest, 'train.tsv'), 'w') as train_f, open(
os.path.join(args.dest, 'valid.tsv'), 'w') as valid_f:
print(dir_path, file=train_f)
print(dir_path, file=valid_f)
for fname in glob.iglob(search_path, recursive=True):
file_path = os.path.realpath(fname)
if args.path_must_contain and args.path_must_contain not in file_path:
continue
frames = soundfile.info(fname).frames
dest = train_f if rand.random() > args.valid_percent else valid_f
print('{}\t{}'.format(os.path.relpath(file_path, dir_path), frames), file=dest)
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| 2,176 | 37.192982 | 114 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/wav2vec/wav2vec_featurize.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import soundfile as sf
import numpy as np
import torch
from torch import nn
import tqdm
from fairseq.models.wav2vec.wav2vec import Wav2VecModel
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname)
self.args = checkpoint["args"]
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint["model"])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for wav2letter++ datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i",
help="Input Directory", **kwargs)
self.add_argument("--output", "-o",
help="Output Directory", **kwargs)
self.add_argument("--model",
help="Path to model checkpoint", **kwargs)
self.add_argument("--split",
help="Dataset Splits", nargs='+', **kwargs)
self.add_argument("--ext", default="wav", required=False,
help="Audio file extension")
self.add_argument("--no-copy-labels", action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.")
self.add_argument("--use-feat", action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument("--gpu",
help="GPU to use", default=0, type=int)
class Prediction():
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer():
""" Write features as hdf5 file in wav2letter++ compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
""" Given a model and a wav2letter++ dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the wav2letter++ dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(self, input_root, output_root, split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), \
"Input path '{}' does not exist".format(self.input_path)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(filter(lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))))
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(lambda x: os.path.join(self.output_path, x.replace("." + self.extension, ".h5context")), \
map(os.path.basename, paths))
for name, target_fname in self._progress(zip(paths, fnames_context), total=len(self)):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| 7,110 | 29.004219 | 135 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/wav2vec/libri_labels.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("tsv")
parser.add_argument("--output-dir", required=True)
parser.add_argument("--output-name", required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with open(args.tsv, "r") as tsv, open(
os.path.join(args.output_dir, args.output_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.output_dir, args.output_name + ".wrd"), "w"
) as wrd_out:
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if dir not in transcriptions:
parts = dir.split("/")
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path)
texts = {}
with open(path, "r") as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = " ".join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split(".")[0]
assert part in transcriptions[dir]
print(transcriptions[dir][part], file=wrd_out)
print(
" ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |",
file=ltr_out,
)
if __name__ == "__main__":
main()
| 1,836 | 31.22807 | 83 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/backtranslation/extract_bt_data.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fileinput
from tqdm import tqdm
def main():
parser = argparse.ArgumentParser(description=(
'Extract back-translations from the stdout of fairseq-generate. '
'If there are multiply hypotheses for a source, we only keep the first one. '
))
parser.add_argument('--output', required=True, help='output prefix')
parser.add_argument('--srclang', required=True, help='source language (extracted from H-* lines)')
parser.add_argument('--tgtlang', required=True, help='target language (extracted from S-* lines)')
parser.add_argument('--minlen', type=int, help='min length filter')
parser.add_argument('--maxlen', type=int, help='max length filter')
parser.add_argument('--ratio', type=float, help='ratio filter')
parser.add_argument('files', nargs='*', help='input files')
args = parser.parse_args()
def validate(src, tgt):
srclen = len(src.split(' ')) if src != '' else 0
tgtlen = len(tgt.split(' ')) if tgt != '' else 0
if (
(args.minlen is not None and (srclen < args.minlen or tgtlen < args.minlen))
or (args.maxlen is not None and (srclen > args.maxlen or tgtlen > args.maxlen))
or (args.ratio is not None and (max(srclen, tgtlen) / float(min(srclen, tgtlen)) > args.ratio))
):
return False
return True
def safe_index(toks, index, default):
try:
return toks[index]
except IndexError:
return default
with open(args.output + '.' + args.srclang, 'w') as src_h, \
open(args.output + '.' + args.tgtlang, 'w') as tgt_h:
for line in tqdm(fileinput.input(args.files)):
if line.startswith('S-'):
tgt = safe_index(line.rstrip().split('\t'), 1, '')
elif line.startswith('H-'):
if tgt is not None:
src = safe_index(line.rstrip().split('\t'), 2, '')
if validate(src, tgt):
print(src, file=src_h)
print(tgt, file=tgt_h)
tgt = None
if __name__ == '__main__':
main()
| 2,363 | 38.4 | 107 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/backtranslation/deduplicate_lines.py
|
#!/usr/bin/python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fileinput
import hashlib
from multiprocessing import Pool
import sys
def get_hashes_and_lines(raw_line):
hash = hashlib.md5(raw_line).hexdigest()
return hash, raw_line
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--workers', type=int, default=10)
parser.add_argument('files', nargs='*', help='input files')
args = parser.parse_args()
seen = set()
with fileinput.input(args.files, mode='rb') as h:
pool = Pool(args.workers)
results = pool.imap_unordered(get_hashes_and_lines, h, 1000)
for i, (hash, raw_line) in enumerate(results):
if hash not in seen:
seen.add(hash)
sys.stdout.buffer.write(raw_line)
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
if __name__ == '__main__':
main()
| 1,221 | 28.095238 | 68 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/translation_moe/score.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Scoring script for computing pairwise BLEU and multi-ref BLEU over a set of
candidate hypotheses.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
"""
import argparse
from itertools import chain
import sys
import random
import numpy as np
from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu
def main():
parser = argparse.ArgumentParser(sys.argv[0])
parser.add_argument('--sys', nargs='*', default='', metavar='FILE',
help='path to system output')
parser.add_argument('--ref', default='', metavar='FILE',
help='path to references')
parser.add_argument('--output', default='', metavar='FILE',
help='print outputs into a pretty format')
args = parser.parse_args()
if args.sys:
src, tgt, hypos, log_probs = load_sys(args.sys)
print('pairwise BLEU: %.2f' % pairwise(hypos))
if args.output:
merge(src, tgt, hypos, log_probs, args.output)
if args.ref:
_, _, refs = load_ref(args.ref)
if args.sys:
multi_ref(refs, hypos)
else:
intra_ref(refs)
def dictolist(d):
a = sorted(d.items(), key=lambda i: i[0])
return [i[1] for i in a]
def load_sys(paths):
src, tgt, hypos, log_probs = {}, {}, {}, {}
for path in paths:
with open(path) as f:
for line in f:
line = line.rstrip()
# S: source
# T: target
# D: detokenized system output
if line.startswith(('S-', 'T-', 'D-')):
i = int(line[line.find('-')+1:line.find('\t')])
if line.startswith('S-'):
src[i] = line.split('\t')[1]
if line.startswith('T-'):
tgt[i] = line.split('\t')[1]
if line.startswith('D-'):
if i not in hypos:
hypos[i] = []
log_probs[i] = []
hypos[i].append(line.split('\t')[2])
log_probs[i].append(float(line.split('\t')[1]))
return dictolist(src), dictolist(tgt), dictolist(hypos), dictolist(log_probs)
def load_ref(path):
with open(path) as f:
lines = f.readlines()
src, tgt, refs = [], [], []
i = 0
while i < len(lines):
if lines[i].startswith('S-'):
src.append(lines[i].split('\t')[1].rstrip())
i += 1
elif lines[i].startswith('T-'):
tgt.append(lines[i].split('\t')[1].rstrip())
i += 1
else:
a = []
while i < len(lines) and lines[i].startswith('R'):
a.append(lines[i].split('\t')[1].rstrip())
i += 1
refs.append(a)
return src, tgt, refs
def merge(src, tgt, hypos, log_probs, path):
with open(path, 'w') as f:
for s, t, hs, lps in zip(src, tgt, hypos, log_probs):
f.write(s + '\n')
f.write(t + '\n')
f.write('\n')
for h, lp in zip(hs, lps):
f.write('\t%f\t%s\n' % (lp, h.strip()))
f.write('------------------------------------------------------\n')
def corpus_bleu(sys_stream, ref_streams):
bleu = _corpus_bleu(sys_stream, ref_streams, tokenize='none')
return bleu.score
def sentence_bleu(hypothesis, reference):
bleu = _corpus_bleu(hypothesis, reference)
for i in range(1, 4):
bleu.counts[i] += 1
bleu.totals[i] += 1
bleu = compute_bleu(
bleu.counts, bleu.totals,
bleu.sys_len, bleu.ref_len,
smooth_method='exp',
)
return bleu.score
def pairwise(sents):
_ref, _hypo = [], []
for s in sents:
for i in range(len(s)):
for j in range(len(s)):
if i != j:
_ref.append(s[i])
_hypo.append(s[j])
return corpus_bleu(_hypo, [_ref])
def multi_ref(refs, hypos):
_ref, _hypo = [], []
ref_cnt = 0
assert len(refs) == len(hypos)
# count number of refs covered
for rs, hs in zip(refs, hypos):
a = set()
for h in hs:
s = [sentence_bleu(h, r) for r in rs]
j = np.argmax(s)
_ref.append(rs[j])
_hypo.append(h)
best = [k for k in range(len(rs)) if s[k] == s[j]]
a.add(random.choice(best))
ref_cnt += len(a)
print('#refs covered: %.2f' % (ref_cnt / len(refs)))
# transpose refs and hypos
refs = list(zip(*refs))
hypos = list(zip(*hypos))
# compute multi-ref corpus BLEU (leave-one-out to be comparable to intra_ref)
k = len(hypos)
m = len(refs)
flat_hypos = [hypos[j][i] for i in range(len(hypos[0])) for j in range(k)]
duplicated_refs = [
[ref for ref in refs_i for _ in range(k)]
for refs_i in refs
]
loo_bleus = []
for held_out_ref in range(m):
remaining_refs = duplicated_refs[:held_out_ref] + duplicated_refs[held_out_ref+1:]
assert len(remaining_refs) == m - 1
loo_bleus.append(corpus_bleu(flat_hypos, remaining_refs))
print('average multi-reference BLEU (leave-one-out): %.2f' % np.mean(loo_bleus))
def intra_ref(refs):
print('ref pairwise BLEU: %.2f' % pairwise(refs))
refs = list(zip(*refs))
m = len(refs)
concat_h = []
concat_rest = [[] for j in range(m - 1)]
for i, h in enumerate(refs):
rest = refs[:i] + refs[i+1:]
concat_h.append(h)
for j in range(m - 1):
concat_rest[j].extend(rest[j])
concat_h = list(chain.from_iterable(concat_h))
bleu = corpus_bleu(concat_h, concat_rest)
print('multi-reference BLEU (leave-one-out): %.2f' % bleu)
if __name__ == '__main__':
main()
| 6,101 | 30.61658 | 90 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/translation_moe/src/mean_pool_gating_network.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
class MeanPoolGatingNetwork(torch.nn.Module):
"""A simple mean-pooling gating network for selecting experts.
This module applies mean pooling over an encoder's output and returns
reponsibilities for each expert. The encoder format is expected to match
:class:`fairseq.models.transformer.TransformerEncoder`.
"""
def __init__(self, embed_dim, num_experts, dropout=None):
super().__init__()
self.embed_dim = embed_dim
self.num_experts = num_experts
self.fc1 = torch.nn.Linear(embed_dim, embed_dim)
self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None
self.fc2 = torch.nn.Linear(embed_dim, num_experts)
def forward(self, encoder_out):
if not (
hasattr(encoder_out, 'encoder_out')
and hasattr(encoder_out, 'encoder_padding_mask')
and encoder_out.encoder_out.size(2) == self.embed_dim
):
raise ValueError('Unexpected format for encoder_out')
# mean pooling over time
encoder_padding_mask = encoder_out.encoder_padding_mask # B x T
encoder_out = encoder_out.encoder_out.transpose(0, 1) # B x T x C
if encoder_padding_mask is not None:
encoder_out = encoder_out.clone() # required because of transpose above
encoder_out[encoder_padding_mask] = 0
ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True)
x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out)
else:
x = torch.mean(encoder_out, dim=1)
x = torch.tanh(self.fc1(x))
if self.dropout is not None:
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
| 2,007 | 38.372549 | 84 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/translation_moe/src/logsumexp_moe.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class LogSumExpMoE(torch.autograd.Function):
"""Standard LogSumExp forward pass, but use *posterior* for the backward.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
"""
@staticmethod
def forward(ctx, logp, posterior, dim=-1):
ctx.save_for_backward(posterior)
ctx.dim = dim
return torch.logsumexp(logp, dim=dim)
@staticmethod
def backward(ctx, grad_output):
posterior, = ctx.saved_tensors
grad_logp = grad_output.unsqueeze(ctx.dim) * posterior
return grad_logp, None, None
| 835 | 29.962963 | 78 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/translation_moe/src/translation_moe.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import metrics, utils
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from .logsumexp_moe import LogSumExpMoE
from .mean_pool_gating_network import MeanPoolGatingNetwork
@register_task('translation_moe')
class TranslationMoETask(TranslationTask):
"""
Translation task for Mixture of Experts (MoE) models.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--method', default='hMoEup',
choices=['sMoElp', 'sMoEup', 'hMoElp', 'hMoEup'])
parser.add_argument('--num-experts', default=3, type=int, metavar='N',
help='number of experts')
parser.add_argument('--mean-pool-gating-network', action='store_true',
help='use a simple mean-pooling gating network')
parser.add_argument('--mean-pool-gating-network-dropout', type=float,
help='dropout for mean-pooling gating network')
parser.add_argument('--mean-pool-gating-network-encoder-dim', type=float,
help='encoder output dim for mean-pooling gating network')
parser.add_argument('--gen-expert', type=int, default=0,
help='which expert to use for generation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
if args.method == 'sMoElp':
# soft MoE with learned prior
self.uniform_prior = False
self.hard_selection = False
elif args.method == 'sMoEup':
# soft MoE with uniform prior
self.uniform_prior = True
self.hard_selection = False
elif args.method == 'hMoElp':
# hard MoE with learned prior
self.uniform_prior = False
self.hard_selection = True
elif args.method == 'hMoEup':
# hard MoE with uniform prior
self.uniform_prior = True
self.hard_selection = True
# add indicator tokens for each expert
for i in range(args.num_experts):
# add to both dictionaries in case we're sharing embeddings
src_dict.add_symbol('<expert_{}>'.format(i))
tgt_dict.add_symbol('<expert_{}>'.format(i))
super().__init__(args, src_dict, tgt_dict)
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
if not self.uniform_prior and not hasattr(model, 'gating_network'):
if self.args.mean_pool_gating_network:
if getattr(args, 'mean_pool_gating_network_encoder_dim', None):
encoder_dim = args.mean_pool_gating_network_encoder_dim
elif getattr(args, 'encoder_embed_dim', None):
# assume that encoder_embed_dim is the encoder's output dimension
encoder_dim = args.encoder_embed_dim
else:
raise ValueError('Must specify --mean-pool-gating-network-encoder-dim')
if getattr(args, 'mean_pool_gating_network_dropout', None):
dropout = args.mean_pool_gating_network_dropout
elif getattr(args, 'dropout', None):
dropout = args.dropout
else:
raise ValueError('Must specify --mean-pool-gating-network-dropout')
model.gating_network = MeanPoolGatingNetwork(
encoder_dim, args.num_experts, dropout,
)
else:
raise ValueError(
'translation_moe task with learned prior requires the model to '
'have a gating network; try using --mean-pool-gating-network'
)
return model
def expert_index(self, i):
return i + self.tgt_dict.index('<expert_0>')
def _get_loss(self, sample, model, criterion):
assert hasattr(criterion, 'compute_loss'), \
'translation_moe task requires the criterion to implement the compute_loss() method'
k = self.args.num_experts
bsz = sample['target'].size(0)
def get_lprob_y(encoder_out, prev_output_tokens_k):
net_output = model.decoder(
prev_output_tokens=prev_output_tokens_k,
encoder_out=encoder_out,
)
loss, _ = criterion.compute_loss(model, net_output, sample, reduce=False)
loss = loss.view(bsz, -1)
return -loss.sum(dim=1, keepdim=True) # -> B x 1
def get_lprob_yz(winners=None):
encoder_out = model.encoder(
src_tokens=sample['net_input']['src_tokens'],
src_lengths=sample['net_input']['src_lengths'],
)
if winners is None:
lprob_y = []
for i in range(k):
prev_output_tokens_k = sample['net_input']['prev_output_tokens'].clone()
assert not prev_output_tokens_k.requires_grad
prev_output_tokens_k[:, 0] = self.expert_index(i)
lprob_y.append(get_lprob_y(encoder_out, prev_output_tokens_k))
lprob_y = torch.cat(lprob_y, dim=1) # -> B x K
else:
prev_output_tokens_k = sample['net_input']['prev_output_tokens'].clone()
prev_output_tokens_k[:, 0] = self.expert_index(winners)
lprob_y = get_lprob_y(encoder_out, prev_output_tokens_k) # -> B
if self.uniform_prior:
lprob_yz = lprob_y
else:
lprob_z = model.gating_network(encoder_out) # B x K
if winners is not None:
lprob_z = lprob_z.gather(dim=1, index=winners.unsqueeze(-1))
lprob_yz = lprob_y + lprob_z.type_as(lprob_y) # B x K
return lprob_yz
# compute responsibilities without dropout
with utils.eval(model): # disable dropout
with torch.no_grad(): # disable autograd
lprob_yz = get_lprob_yz() # B x K
prob_z_xy = torch.nn.functional.softmax(lprob_yz, dim=1)
assert not prob_z_xy.requires_grad
# compute loss with dropout
if self.hard_selection:
winners = prob_z_xy.max(dim=1)[1]
loss = -get_lprob_yz(winners)
else:
lprob_yz = get_lprob_yz() # B x K
loss = -LogSumExpMoE.apply(lprob_yz, prob_z_xy, 1)
loss = loss.sum()
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data),
'ntokens': sample['ntokens'],
'nsentences': bsz,
'sample_size': sample_size,
'posterior': prob_z_xy.float().sum(dim=0).cpu(),
}
return loss, sample_size, logging_output
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, expert=None, constraints=None):
expert = expert or self.args.gen_expert
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=self.expert_index(expert),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
metrics.log_scalar(
'posterior',
sum(log['posterior'] for log in logging_outputs if 'posterior' in log)
)
| 9,137 | 40.348416 | 107 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/translation_moe/src/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import translation_moe # noqa
| 216 | 30 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/unsupervised_quality_estimation/repeat_lines.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
def _normalize_spaces(line):
return ' '.join(line.split())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', required=True, type=str)
parser.add_argument('-n', '--repeat_times', required=True, type=int)
parser.add_argument('-o', '--output_file', required=False, type=str)
args = parser.parse_args()
stream = open(args.output_file, 'w') if args.output_file else sys.stdout
for line in open(args.input_file):
for _ in range(args.repeat_times):
stream.write(_normalize_spaces(line) + '\n')
if __name__ == '__main__':
main()
| 828 | 27.586207 | 76 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/unsupervised_quality_estimation/aggregate_scores.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import sys
aggregate_funcs = {
'std': np.std,
'var': np.var,
'median': np.median,
'mean': np.mean,
'min': np.min,
'max': np.max,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', required=True, type=str)
parser.add_argument('-n', '--repeat_times', required=True, type=int)
parser.add_argument('-o', '--output_file', required=False)
parser.add_argument('-f', '--func', required=False, default='mean')
args = parser.parse_args()
stream = open(args.output_file, 'w') if args.output_file else sys.stdout
segment_scores = []
for line in open(args.input_file):
segment_scores.append(float(line.strip()))
if len(segment_scores) == args.repeat_times:
stream.write('{}\n'.format(aggregate_funcs[args.func](segment_scores)))
segment_scores = []
if __name__ == '__main__':
main()
| 1,135 | 26.707317 | 83 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/unsupervised_quality_estimation/meteor.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import sys
import subprocess
import tempfile
import math
from itertools import combinations
from collections import defaultdict
def read_translations(path, n_repeats):
segment_counter = 0
segment_translations = []
translations = defaultdict(list)
for line in open(path):
segment_translations.append(' '.join(line.split()))
if len(segment_translations) == n_repeats:
translations[segment_counter] = segment_translations
segment_translations = []
segment_counter += 1
return translations
def generate_input(translations, n_repeats):
_, ref_path = tempfile.mkstemp()
_, mt_path = tempfile.mkstemp()
ref_fh = open(ref_path, 'w')
mt_fh = open(mt_path, 'w')
for segid in sorted(translations.keys()):
assert len(translations[segid]) == n_repeats
indexes = combinations(range(n_repeats), 2)
for idx1, idx2 in indexes:
mt_fh.write(translations[segid][idx1].strip() + '\n')
ref_fh.write(translations[segid][idx2].strip() + '\n')
sys.stderr.write('\nSaved translations to %s and %s' % (ref_path, mt_path))
return ref_path, mt_path
def run_meteor(ref_path, mt_path, metric_path, lang='en'):
_, out_path = tempfile.mkstemp()
subprocess.call([
'java', '-Xmx2G', '-jar', metric_path, mt_path, ref_path,
'-p', '0.5 0.2 0.6 0.75', # default parameters, only changed alpha to give equal weight to P and R
'-norm',
'-l', lang], stdout=open(out_path, 'w'))
os.remove(ref_path)
os.remove(mt_path)
sys.stderr.write('\nSaved Meteor output to %s' % out_path)
return out_path
def read_output(meteor_output_path, n_repeats):
n_combinations = math.factorial(n_repeats)/(math.factorial(2) * math.factorial(n_repeats - 2))
raw_scores = []
average_scores = []
for line in open(meteor_output_path):
if not line.startswith('Segment '):
continue
score = float(line.strip().split('\t')[1])
raw_scores.append(score)
if len(raw_scores) == n_combinations:
average_scores.append(sum(raw_scores)/n_combinations)
raw_scores = []
os.remove(meteor_output_path)
return average_scores
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-n', '--repeat_times', type=int)
parser.add_argument('-m', '--meteor')
parser.add_argument('-o', '--output')
args = parser.parse_args()
translations = read_translations(args.infile, args.repetitions)
sys.stderr.write('\nGenerating input for Meteor...')
ref_path, mt_path = generate_input(translations, args.repetitions)
sys.stderr.write('\nRunning Meteor...')
out_path = run_meteor(ref_path, mt_path, args.meteor)
sys.stderr.write('\nReading output...')
scores = read_output(out_path, args.repetitions)
sys.stderr.write('\nWriting results...')
with open(args.output, 'w') as o:
for scr in scores:
o.write('{}\n'.format(scr))
o.close()
if __name__ == '__main__':
main()
| 3,318 | 32.867347 | 107 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/roberta/preprocess_RACE.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import re
class InputExample:
def __init__(self, paragraph, qa_list, label):
self.paragraph = paragraph
self.qa_list = qa_list
self.label = label
def get_examples(data_dir, set_type):
"""
Extract paragraph and question-answer list from each json file
"""
examples = []
levels = ["middle", "high"]
set_type_c = set_type.split('-')
if len(set_type_c) == 2:
levels = [set_type_c[1]]
set_type = set_type_c[0]
for level in levels:
cur_dir = os.path.join(data_dir, set_type, level)
for filename in os.listdir(cur_dir):
cur_path = os.path.join(cur_dir, filename)
with open(cur_path, 'r') as f:
cur_data = json.load(f)
answers = cur_data["answers"]
options = cur_data["options"]
questions = cur_data["questions"]
context = cur_data["article"].replace("\n", " ")
context = re.sub(r'\s+', ' ', context)
for i in range(len(answers)):
label = ord(answers[i]) - ord("A")
qa_list = []
question = questions[i]
for j in range(4):
option = options[i][j]
if "_" in question:
qa_cat = question.replace("_", option)
else:
qa_cat = " ".join([question, option])
qa_cat = re.sub(r'\s+', ' ', qa_cat)
qa_list.append(qa_cat)
examples.append(InputExample(context, qa_list, label))
return examples
def main():
"""
Helper script to extract paragraphs questions and answers from RACE datasets.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-dir",
help='input directory for downloaded RACE dataset',
)
parser.add_argument(
"--output-dir",
help='output directory for extracted data',
)
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
for set_type in ["train", "dev", "test-middle", "test-high"]:
examples = get_examples(args.input_dir, set_type)
qa_file_paths = [os.path.join(args.output_dir, set_type + ".input" + str(i + 1)) for i in range(4)]
qa_files = [open(qa_file_path, 'w') for qa_file_path in qa_file_paths]
outf_context_path = os.path.join(args.output_dir, set_type + ".input0")
outf_label_path = os.path.join(args.output_dir, set_type + ".label")
outf_context = open(outf_context_path, 'w')
outf_label = open(outf_label_path, 'w')
for example in examples:
outf_context.write(example.paragraph + '\n')
for i in range(4):
qa_files[i].write(example.qa_list[i] + '\n')
outf_label.write(str(example.label) + '\n')
for f in qa_files:
f.close()
outf_label.close()
outf_context.close()
if __name__ == '__main__':
main()
| 3,395 | 32.96 | 107 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/roberta/multiprocessing_bpe_encoder.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import sys
from collections import Counter
from multiprocessing import Pool
from fairseq.data.encoders.gpt2_bpe import get_encoder
def main():
"""
Helper script to encode raw text with the GPT-2 BPE using multiple processes.
The encoder.json and vocab.bpe files can be obtained here:
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--encoder-json",
help='path to encoder.json',
)
parser.add_argument(
"--vocab-bpe",
type=str,
help='path to vocab.bpe',
)
parser.add_argument(
"--inputs",
nargs="+",
default=['-'],
help="input files to filter/encode",
)
parser.add_argument(
"--outputs",
nargs="+",
default=['-'],
help="path to save encoded outputs",
)
parser.add_argument(
"--keep-empty",
action="store_true",
help="keep empty lines",
)
parser.add_argument("--workers", type=int, default=20)
args = parser.parse_args()
assert len(args.inputs) == len(args.outputs), \
"number of input and output paths should match"
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-" else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-" else sys.stdout
for output in args.outputs
]
encoder = MultiprocessingEncoder(args)
pool = Pool(args.workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100)
stats = Counter()
for i, (filt, enc_lines) in enumerate(encoded_lines, start=1):
if filt == "PASS":
for enc_line, output_h in zip(enc_lines, outputs):
print(enc_line, file=output_h)
else:
stats["num_filtered_" + filt] += 1
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
for k, v in stats.most_common():
print("[{}] filtered {} lines".format(k, v), file=sys.stderr)
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe)
def encode(self, line):
global bpe
ids = bpe.encode(line)
return list(map(str, ids))
def decode(self, tokens):
global bpe
return bpe.decode(tokens)
def encode_lines(self, lines):
"""
Encode a set of lines. All lines will be encoded together.
"""
enc_lines = []
for line in lines:
line = line.strip()
if len(line) == 0 and not self.args.keep_empty:
return ["EMPTY", None]
tokens = self.encode(line)
enc_lines.append(" ".join(tokens))
return ["PASS", enc_lines]
def decode_lines(self, lines):
dec_lines = []
for line in lines:
tokens = map(int, line.strip().split())
dec_lines.append(self.decode(tokens))
return ["PASS", dec_lines]
if __name__ == "__main__":
main()
| 3,756 | 27.9 | 81 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/roberta/commonsense_qa/commonsense_qa_task.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
from fairseq.data import (
data_utils,
Dictionary,
encoders,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
)
from fairseq.tasks import FairseqTask, register_task
@register_task('commonsense_qa')
class CommonsenseQATask(FairseqTask):
"""Task to finetune RoBERTa for Commonsense QA."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='DIR',
help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
parser.add_argument('--num-classes', type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'sentence_ranking', 'Must set --criterion=sentence_ranking'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def binarize(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s, append_eos=True, add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if 'answerKey' in example:
label = ord(example['answerKey']) - ord('A')
labels.append(label)
question = example['question']['stem']
assert len(example['question']['choices']) == self.args.num_classes
# format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`
question = 'Q: ' + question
question_toks = binarize(question, append_bos=True)
for i, choice in enumerate(example['question']['choices']):
src = 'A: ' + choice['text']
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(len(src_tokens[0]) == len(src_tokens[i]) for i in range(self.args.num_classes))
assert len(src_tokens[0]) == len(src_lengths[0])
assert len(labels) == 0 or len(labels) == len(src_tokens[0])
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {
'id': IdDataset(),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_tokens[0], reduce=True),
}
for i in range(self.args.num_classes):
dataset.update({
'net_input{}'.format(i + 1): {
'src_tokens': RightPadDataset(
src_tokens[i],
pad_idx=self.source_dictionary.pad(),
),
'src_lengths': src_lengths[i],
}
})
if len(labels) > 0:
dataset.update({'target': RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(
dataset,
# shuffle
sort_order=[np.random.permutation(len(dataset))],
)
print('| Loaded {} with {} samples'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
'sentence_classification_head',
num_classes=1,
)
return model
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
| 5,921 | 32.84 | 103 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/roberta/commonsense_qa/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import commonsense_qa_task # noqa
| 220 | 30.571429 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/roberta/wsc/wsc_task.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import tempfile
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import (
data_utils,
Dictionary,
encoders,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PadDataset,
SortDataset,
)
from fairseq.tasks import FairseqTask, register_task
from . import wsc_utils
@register_task('wsc')
class WSCTask(FairseqTask):
"""Task to finetune RoBERTa for Winograd Schemas."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='DIR',
help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
# hack to handle GPT-2 BPE, which includes leading spaces
if args.bpe == 'gpt2':
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'wsc', 'Must set --criterion=wsc'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool = False):
if self.tokenizer is not None:
s = self.tokenizer.encode(s)
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s, append_eos=append_eos, add_if_not_exist=False,
).long()
if self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start:mask_start + mask_size] = 1
return toks, mask
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[:pronoun_span.start].text
suffix = sentence[pronoun_span.end:].text_with_ws
# spaCy spans include trailing spaces, but we need to know about
# leading spaces for the GPT-2 BPE
leading_space = ' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else ''
trailing_space = ' ' if pronoun_span.text_with_ws.endswith(' ') else ''
# get noun phrases, excluding pronouns and anything overlapping with the query
cand_spans = wsc_utils.filter_noun_chunks(
wsc_utils.extended_noun_chunks(sentence),
exclude_pronouns=True,
exclude_query=query,
exact_match=False,
)
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_masks = [], []
for cand_span in cand_spans:
toks, mask = self.binarize_with_mask(
cand_span.text, prefix, suffix, leading_space, trailing_space,
)
cand_toks.append(toks)
cand_masks.append(mask)
# collate candidates
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert cand_toks.size() == cand_masks.size()
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, [1]*len(labels))
dataset = {
'id': IdDataset(),
'query_tokens': query_tokens,
'query_masks': query_masks,
'candidate_tokens': candidate_tokens,
'candidate_masks': candidate_masks,
'labels': labels,
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + '\n').encode('utf-8'))
dataset = self.load_dataset(
'disambiguate_pronoun',
data_path=h.name,
return_only=True,
)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
logits, _ = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
cand_lprobs = get_lprobs(
sample['candidate_tokens'][0],
sample['candidate_masks'][0],
)
if sample['query_tokens'][0] is not None:
query_lprobs = get_lprobs(
sample['query_tokens'][0].unsqueeze(0),
sample['query_masks'][0].unsqueeze(0),
)
return (query_lprobs >= cand_lprobs).all().item() == 1
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample['candidate_tokens'][0][best_idx]
mask = sample['candidate_masks'][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
@register_task('winogrande')
class WinograndeTask(WSCTask):
"""
Task for WinoGrande dataset. Efficient implementation for Winograd schema
tasks with exactly two candidates, one of which is correct.
"""
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'winogrande', 'Must set --criterion=winogrande'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == 'test'))
for sample in itr:
sentence, pronoun_span, query, cand_text = sample
prefix = sentence[:pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1]:]
leading_space = ' ' if sentence[:pronoun_span[0]].endswith(' ') else ''
trailing_space = ''
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space,
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_mask = self.binarize_with_mask(
cand_text, prefix, suffix, leading_space, trailing_space,
)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(
ListDataset(tokens, length),
pad_idx=pad_idx,
left_pad=False,
)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(candidate_tokens, candidate_lengths, self.vocab.pad())
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {
'id': IdDataset(),
'query_tokens': query_tokens,
'query_masks': query_masks,
'candidate_tokens': candidate_tokens,
'candidate_masks': candidate_masks,
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
| 13,148 | 33.970745 | 103 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/roberta/wsc/wsc_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import json
def convert_sentence_to_json(sentence):
if '_' in sentence:
prefix, rest = sentence.split('_', 1)
query, rest = rest.split('_', 1)
query_index = len(prefix.rstrip().split(' '))
else:
query, query_index = None, None
prefix, rest = sentence.split('[', 1)
pronoun, rest = rest.split(']', 1)
pronoun_index = len(prefix.rstrip().split(' '))
sentence = sentence.replace('_', '').replace('[', '').replace(']', '')
return {
'idx': 0,
'text': sentence,
'target': {
'span1_index': query_index,
'span1_text': query,
'span2_index': pronoun_index,
'span2_text': pronoun,
},
}
def extended_noun_chunks(sentence):
noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks}
np_start, cur_np = 0, 'NONE'
for i, token in enumerate(sentence):
np_type = token.pos_ if token.pos_ in {'NOUN', 'PROPN'} else 'NONE'
if np_type != cur_np:
if cur_np != 'NONE':
noun_chunks.add((np_start, i))
if np_type != 'NONE':
np_start = i
cur_np = np_type
if cur_np != 'NONE':
noun_chunks.add((np_start, len(sentence)))
return [sentence[s:e] for (s, e) in sorted(noun_chunks)]
def find_token(sentence, start_pos):
found_tok = None
for tok in sentence:
if tok.idx == start_pos:
found_tok = tok
break
return found_tok
def find_span(sentence, search_text, start=0):
search_text = search_text.lower()
for tok in sentence[start:]:
remainder = sentence[tok.i:].text.lower()
if remainder.startswith(search_text):
len_to_consume = len(search_text)
start_idx = tok.idx
for next_tok in sentence[tok.i:]:
end_idx = next_tok.idx + len(next_tok.text)
if end_idx - start_idx == len_to_consume:
span = sentence[tok.i:next_tok.i + 1]
return span
return None
@lru_cache(maxsize=1)
def get_detokenizer():
from sacremoses import MosesDetokenizer
detok = MosesDetokenizer(lang='en')
return detok
@lru_cache(maxsize=1)
def get_spacy_nlp():
import en_core_web_lg
nlp = en_core_web_lg.load()
return nlp
def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False):
detok = get_detokenizer()
nlp = get_spacy_nlp()
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
if positive_only and 'label' in sample and not sample['label']:
# only consider examples where the query is correct
continue
target = sample['target']
# clean up the query
query = target['span1_text']
if query is not None:
if '\n' in query:
continue
if query.endswith('.') or query.endswith(','):
query = query[:-1]
# split tokens
tokens = sample['text'].split(' ')
def strip_pronoun(x):
return x.rstrip('.,"')
# find the pronoun
pronoun_idx = target['span2_index']
pronoun = strip_pronoun(target['span2_text'])
if strip_pronoun(tokens[pronoun_idx]) != pronoun:
# hack: sometimes the index is misaligned
if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun:
pronoun_idx += 1
else:
raise Exception('Misaligned pronoun!')
assert strip_pronoun(tokens[pronoun_idx]) == pronoun
# split tokens before and after the pronoun
before = tokens[:pronoun_idx]
after = tokens[pronoun_idx + 1:]
# the GPT BPE attaches leading spaces to tokens, so we keep track
# of whether we need spaces before or after the pronoun
leading_space = ' ' if pronoun_idx > 0 else ''
trailing_space = ' ' if len(after) > 0 else ''
# detokenize
before = detok.detokenize(before, return_str=True)
pronoun = detok.detokenize([pronoun], return_str=True)
after = detok.detokenize(after, return_str=True)
# hack: when the pronoun ends in a period (or comma), move the
# punctuation to the "after" part
if pronoun.endswith('.') or pronoun.endswith(','):
after = pronoun[-1] + trailing_space + after
pronoun = pronoun[:-1]
# hack: when the "after" part begins with a comma or period, remove
# the trailing space
if after.startswith('.') or after.startswith(','):
trailing_space = ''
# parse sentence with spacy
sentence = nlp(before + leading_space + pronoun + trailing_space + after)
# find pronoun span
start = len(before + leading_space)
first_pronoun_tok = find_token(sentence, start_pos=start)
pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i)
assert pronoun_span.text == pronoun
if eval:
# convert to format where pronoun is surrounded by "[]" and
# query is surrounded by "_"
query_span = find_span(sentence, query)
query_with_ws = '_{}_{}'.format(
query_span.text,
(' ' if query_span.text_with_ws.endswith(' ') else '')
)
pronoun_with_ws = '[{}]{}'.format(
pronoun_span.text,
(' ' if pronoun_span.text_with_ws.endswith(' ') else '')
)
if query_span.start < pronoun_span.start:
first = (query_span, query_with_ws)
second = (pronoun_span, pronoun_with_ws)
else:
first = (pronoun_span, pronoun_with_ws)
second = (query_span, query_with_ws)
sentence = (
sentence[:first[0].start].text_with_ws
+ first[1]
+ sentence[first[0].end:second[0].start].text_with_ws
+ second[1]
+ sentence[second[0].end:].text
)
yield sentence, sample.get('label', None)
else:
yield sentence, pronoun_span, query, sample.get('label', None)
def winogrande_jsonl_iterator(input_fname, eval=False):
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
sentence, option1, option2 = sample['sentence'], sample['option1'],\
sample['option2']
pronoun_span = (sentence.index('_'), sentence.index('_') + 1)
if eval:
query, cand = option1, option2
else:
query = option1 if sample['answer'] == '1' else option2
cand = option2 if sample['answer'] == '1' else option1
yield sentence, pronoun_span, query, cand
def filter_noun_chunks(chunks, exclude_pronouns=False, exclude_query=None, exact_match=False):
if exclude_pronouns:
chunks = [
np for np in chunks if (
np.lemma_ != '-PRON-'
and not all(tok.pos_ == 'PRON' for tok in np)
)
]
if exclude_query is not None:
excl_txt = [exclude_query.lower()]
filtered_chunks = []
for chunk in chunks:
lower_chunk = chunk.text.lower()
found = False
for excl in excl_txt:
if (
(not exact_match and (lower_chunk in excl or excl in lower_chunk))
or lower_chunk == excl
):
found = True
break
if not found:
filtered_chunks.append(chunk)
chunks = filtered_chunks
return chunks
| 8,329 | 34.147679 | 94 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/roberta/wsc/wsc_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
@register_criterion('wsc')
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, 'w')
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument('--wsc-margin-alpha', type=float, metavar='A', default=1.0)
parser.add_argument('--wsc-margin-beta', type=float, metavar='B', default=0.0)
parser.add_argument('--wsc-cross-entropy', action='store_true',
help='use cross entropy formulation instead of margin loss')
parser.add_argument('--save-predictions', metavar='FILE',
help='file to save predictions to')
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
- query_lprobs
+ self.args.wsc_margin_alpha * (
cand_lprobs - query_lprobs + self.args.wsc_margin_beta
).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0., 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample['labels']):
query_lprobs = self.get_lprobs(
model,
sample['query_tokens'][i].unsqueeze(0),
sample['query_masks'][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample['candidate_tokens'][i],
sample['candidate_masks'][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample['id'][i].item()
if self.prediction_h is not None:
print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'ncorrect': ncorrect,
'nqueries': nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
ncorrect = sum(log.get('ncorrect', 0) for log in logging_outputs)
nqueries = sum(log.get('nqueries', 0) for log in logging_outputs)
if nqueries > 0:
agg_output['accuracy'] = ncorrect / float(nqueries)
return agg_output
@register_criterion('winogrande')
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample['query_tokens'],
sample['query_masks'],
)
cand_lprobs = self.get_lprobs(
model,
sample['candidate_tokens'],
sample['candidate_masks'],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample['query_tokens'].size(0)
ncorrect = pred.sum().item()
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'ncorrect': ncorrect,
'nqueries': sample_size,
}
return loss, sample_size, logging_output
| 6,034 | 35.137725 | 88 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/roberta/wsc/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import wsc_criterion # noqa
from . import wsc_task # noqa
| 245 | 29.75 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/__init__.py
|
from . import tasks, criterions, models # noqa
| 48 | 23.5 | 47 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/infer.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import editdistance
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, utils, tasks
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.data.data_utils import post_process
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary\
output units",
)
parser.add_argument(
"--lm-weight",
"--lm_weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
parser.add_argument(
"--w2l-decoder", choices=["viterbi", "kenlm", "fairseqlm"], help="use a w2l decoder"
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--unit-lm", action='store_true', help="if using a unit lm")
parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--beam-size-token", type=float, default=100)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
parser.add_argument(
"--dump-emissions",
type=str,
default=None,
help="if present, dumps emissions into this file and exits",
)
parser.add_argument(
"--dump-features",
type=str,
default=None,
help="if present, dumps features into this file and exits",
)
parser.add_argument(
"--load-emissions",
type=str,
default=None,
help="if present, loads emissions from this file",
)
return parser
def check_args(args):
# assert args.path is not None, "--path required for generation!"
# assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def get_dataset_itr(args, task, models):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, args.remove_bpe)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id), file=res_files["hypo.units"]
)
print("{} ({}-{})".format(hyp_words, speaker, id), file=res_files["hypo.words"])
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, args.remove_bpe)
if res_files is not None:
print("{} ({}-{})".format(tgt_pieces, speaker, id), file=res_files["ref.units"])
print("{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"])
# only score top hypothesis
if not args.quiet:
logger.debug("HYPO:" + hyp_words)
logger.debug("TARGET:" + tgt_words)
logger.debug("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def prepare_result_files(args):
def get_res_file(file_prefix):
if args.num_shards > 1:
file_prefix = f'{args.shard_id}_{file_prefix}'
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
if not args.results_path:
return None
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def load_models_and_criterions(filenames, data_path, arg_overrides=None, task=None, model_state=None):
models = []
criterions = []
if arg_overrides is None:
arg_overrides = {}
arg_overrides['wer_args'] = None
arg_overrides['data'] = data_path
if filenames is None:
assert model_state is not None
filenames = [0]
else:
filenames = filenames.split(":")
for filename in filenames:
if model_state is None:
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides)
else:
state = model_state
args = state["args"]
if task is None:
task = tasks.setup_task(args)
model = task.build_model(args)
model.load_state_dict(state["model"], strict=True)
models.append(model)
criterion = task.build_criterion(args)
if "criterion" in state:
criterion.load_state_dict(state["criterion"], strict=True)
criterions.append(criterion)
return models, criterions, args
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation
"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
class ExistingEmissionsDecoder(object):
def __init__(self, decoder, emissions):
self.decoder = decoder
self.emissions = emissions
def generate(self, models, sample, **unused):
ids = sample["id"].cpu().numpy()
try:
emissions = np.stack(self.emissions[ids])
except:
print([x.shape for x in self.emissions[ids]])
raise Exception('invalid sizes')
emissions = torch.from_numpy(emissions)
return self.decoder.decode(emissions)
def main(args, task=None, model_state=None):
check_args(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 4000000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
if task is None:
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info("| decoding with criterion {}".format(args.criterion))
# Load ensemble
if args.load_emissions:
models, criterions = [], []
else:
logger.info("| loading model(s) from {}".format(args.path))
models, criterions, _ = load_models_and_criterions(
args.path,
data_path=args.data,
arg_overrides=eval(args.model_overrides), # noqa
task=task,
model_state=model_state,
)
optimize_models(args, use_cuda, models)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
trans = criterions[0].asg.trans.data
args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task, models)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, task.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, task.target_dictionary)
else:
return super().build_generator(args)
generator = build_generator(args)
if args.load_emissions:
generator = ExistingEmissionsDecoder(
generator, np.load(args.load_emissions, allow_pickle=True)
)
logger.info("loaded emissions from " + args.load_emissions)
num_sentences = 0
if args.results_path is not None and not os.path.exists(args.results_path):
os.makedirs(args.results_path)
max_source_pos = (
utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
),
)
if max_source_pos is not None:
max_source_pos = max_source_pos[0]
if max_source_pos is not None:
max_source_pos = max_source_pos[0] - 1
if args.dump_emissions:
emissions = {}
if args.dump_features:
features = {}
models[0].bert.proj = None
else:
res_files = prepare_result_files(args)
errs_t = 0
lengths_t = 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
if args.dump_emissions:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
emm = emm.transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
emissions[id.item()] = emm[i]
continue
elif args.dump_features:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
padding = encoder_out["encoder_padding_mask"][i].cpu().numpy() if encoder_out["encoder_padding_mask"] is not None else None
features[id.item()] = (feat[i], padding)
continue
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = None
# id = task.dataset(args.gen_subset).ids[int(sample_id)]
id = sample_id
toks = sample["target"][i, :] if 'target_label' not in sample else sample["target_label"][i, :]
target_tokens = (
utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
)
# Process top predictions
errs, length = process_predictions(
args, hypos[i], None, tgt_dict, target_tokens, res_files, speaker, id
)
errs_t += errs
lengths_t += length
wps_meter.update(num_generated_tokens)
t.log({"wps": round(wps_meter.avg)})
num_sentences += sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
wer = None
if args.dump_emissions:
emm_arr = []
for i in range(len(emissions)):
emm_arr.append(emissions[i])
np.save(args.dump_emissions, emm_arr)
logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
elif args.dump_features:
feat_arr = []
for i in range(len(features)):
feat_arr.append(features[i])
np.save(args.dump_features, feat_arr)
logger.info(f"saved {len(features)} emissions to {args.dump_features}")
else:
if lengths_t > 0:
wer = errs_t * 100.0 / lengths_t
logger.info(f"WER: {wer}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
"sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
return task, wer
def make_parser():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 14,668 | 33.193473 | 147 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/w2l_decoder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wav2letter decoders.
"""
from collections import namedtuple, deque
import gc
import itertools as it
import numpy as np
import torch
import os.path as osp
import warnings
from fairseq import tasks
from fairseq.utils import apply_to_sample
from examples.speech_recognition.data.replabels import unpack_replabels
try:
from wav2letter.common import create_word_dict, load_words
from wav2letter.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from wav2letter.decoder import (
CriterionType,
DecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
LexiconFreeDecoder,
)
except:
warnings.warn(
"wav2letter python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/wav2letter/wiki/Python-bindings"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
if args.criterion == "ctc":
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.asg_transitions = None
elif args.criterion == "asg_loss":
self.criterion_type = CriterionType.ASG
self.blank = -1
self.asg_transitions = args.asg_transitions
self.max_replabel = args.max_replabel
assert len(self.asg_transitions) == self.vocab_size ** 2
else:
raise RuntimeError(f"unknown criterion: {args.criterion}")
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
# encoder_out = models[0].encoder(**encoder_input)
encoder_out = models[0](**encoder_input)
if self.criterion_type == CriterionType.CTC:
emissions = models[0].get_normalized_probs(encoder_out, log_probs=True)
elif self.criterion_type == CriterionType.ASG:
emissions = encoder_out["encoder_out"]
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
if self.criterion_type == CriterionType.CTC:
idxs = filter(lambda x: x != self.blank, idxs)
elif self.criterion_type == CriterionType.ASG:
idxs = filter(lambda x: x >= 0, idxs)
idxs = unpack_replabels(list(idxs), self.tgt_dict, self.max_replabel)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = DecoderOptions(
args.beam,
int(getattr(args, "beam_size_token", len(tgt_dict))),
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
args.sil_weight,
0,
False,
self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
False,
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = tgt_dict.bos()
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
lm_args = checkpoint["args"]
lm_args.data = osp.dirname(args.kenlm_model)
print(lm_args)
task = tasks.setup_task(lm_args)
model = task.build_model(lm_args)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
self.decoder_opts = DecoderOptions(
args.beam,
int(getattr(args, "beam_size_token", len(tgt_dict))),
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
args.sil_weight,
0,
False,
self.criterion_type,
)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| 14,872 | 33.269585 | 164 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/criterions/cross_entropy_acc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| 5,372 | 40.015267 | 85 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/criterions/ASG_loss.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from examples.speech_recognition.data.replabels import pack_replabels
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(
self,
task,
silence_token,
asg_transitions_init,
max_replabel,
linseg_updates,
hide_linseg_messages,
):
from wav2letter.criterion import ASGLoss, CriterionScaleMode
super().__init__(task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(silence_token)
if silence_token in self.tgt_dict
else None
)
self.max_replabel = max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = linseg_updates
self.linseg_message_state = "none" if hide_linseg_messages else "start"
@classmethod
def build_criterion(cls, args, task):
return cls(
task,
args.silence_token,
args.asg_transitions_init,
args.max_replabel,
args.linseg_updates,
args.hide_linseg_messages,
)
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| 5,857 | 33.25731 | 85 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/criterions/__init__.py
|
import importlib
import os
# ASG loss requires wav2letter
files_to_skip = set()
try:
import wav2letter
except ImportError:
files_to_skip.add("ASG_loss.py")
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_recognition.criterions." + criterion_name
)
| 470 | 25.166667 | 87 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/models/vggtransformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import LinearizedConvolution
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
# TODO: shouldn't subsampling_factor determined in advance ?
input_lengths = (src_lengths.float() / subsampling_factor).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}:".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
+ "of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
))
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
))
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, *_ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_2")
def vggtransformer_2(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_base")
def vggtransformer_base(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6"
)
# Size estimations:
# Encoder:
# - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K
# Transformer:
# - input dimension adapter: 2560 x 512 -> 1.31M
# - transformer_layers (x12) --> 37.74M
# * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M
# * FFN weight: 512*2048*2 = 2.097M
# - output dimension adapter: 512 x 512 -> 0.26 M
# Decoder:
# - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3
# - transformer_layer: (x6) --> 25.16M
# * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M
# * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M
# * FFN: 512*2048*2 = 2.097M
# Final FC:
# - FC: 512*5000 = 256K (assuming vocab size 5K)
# In total:
# ~65 M
# CTC models
def base_architecture_enconly(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.transformer_context = getattr(args, "transformer_context", "None")
args.transformer_sampling = getattr(args, "transformer_sampling", "None")
@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1")
def vggtransformer_enc_1(args):
# vggtransformer_1 is the same as vggtransformer_enc_big, except the number
# of layers is increased to 16
# keep it here for backward compatiablity purpose
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
| 37,043 | 35.786495 | 88 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/models/w2l_conv_glu_enc.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
| 6,079 | 32.96648 | 87 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/models/__init__.py
|
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
model_name = file[:file.find('.py')]
importlib.import_module('examples.speech_recognition.models.' + model_name)
| 266 | 32.375 | 83 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/datasets/asr_prep_json.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import concurrent.futures
from itertools import chain
import argparse
import os
import json
import sentencepiece as spm
import multiprocessing
from fairseq.data import Dictionary
MILLISECONDS_TO_SECONDS = 0.001
def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
import torchaudio
input = {}
output = {}
si, ei = torchaudio.info(aud_path)
input["length_ms"] = int(si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS)
input["path"] = aud_path
token = " ".join(sp.EncodeAsPieces(lable))
ids = tgt_dict.encode_line(token, append_eos=False)
output["text"] = lable
output["token"] = token
output["tokenid"] = ', '.join(map(str, [t.tolist() for t in ids]))
return {utt_id: {"input": input, "output": output}}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-dirs", nargs="+", default=['-'], required=True,
help="input directories with audio files")
parser.add_argument("--labels", required=True,
help="aggregated input labels with format <ID LABEL> per line",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--spm-model", required=True,
help="sentencepiece model to use for encoding",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--dictionary", required=True,
help="file to load fairseq dictionary from",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav")
parser.add_argument("--output", required=True, type=argparse.FileType('w'),
help="path to save json output")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.spm_model.name)
tgt_dict = Dictionary.load(args.dictionary)
labels = {}
for line in args.labels:
(utt_id, label) = line.split(" ", 1)
labels[utt_id] = label
if len(labels) == 0:
raise Exception('No labels found in ', args.labels_path)
Sample = namedtuple('Sample', 'aud_path utt_id')
samples = []
for path, _, files in chain.from_iterable(os.walk(path) for path in args.audio_dirs):
for f in files:
if f.endswith(args.audio_format):
if len(os.path.splitext(f)) != 2:
raise Exception('Expect <utt_id.extension> file name. Got: ', f)
utt_id = os.path.splitext(f)[0]
if utt_id not in labels:
continue
samples.append(Sample(os.path.join(path, f), utt_id))
utts = {}
num_cpu = multiprocessing.cpu_count()
with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor:
future_to_sample = {executor.submit(process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict): s for s in samples}
for future in concurrent.futures.as_completed(future_to_sample):
try:
data = future.result()
except Exception as exc:
print('generated an exception: ', exc)
else:
utts.update(data)
json.dump({"utts": utts}, args.output, indent=4)
if __name__ == "__main__":
main()
| 3,670 | 36.845361 | 134 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/utils/wer_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from collections import deque
from enum import Enum
import numpy as np
"""
Utility modules for computation of Word Error Rate,
Alignments, as well as more granular metrics like
deletion, insersion and substitutions.
"""
class Code(Enum):
match = 1
substitution = 2
insertion = 3
deletion = 4
class Token(object):
def __init__(self, lbl="", st=np.nan, en=np.nan):
if np.isnan(st):
self.label, self.start, self.end = "", 0.0, 0.0
else:
self.label, self.start, self.end = lbl, st, en
class AlignmentResult(object):
def __init__(self, refs, hyps, codes, score):
self.refs = refs # std::deque<int>
self.hyps = hyps # std::deque<int>
self.codes = codes # std::deque<Code>
self.score = score # float
def coordinate_to_offset(row, col, ncols):
return int(row * ncols + col)
def offset_to_row(offset, ncols):
return int(offset / ncols)
def offset_to_col(offset, ncols):
return int(offset % ncols)
def trimWhitespace(str):
return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str)))
def str2toks(str):
pieces = trimWhitespace(str).split(" ")
toks = []
for p in pieces:
toks.append(Token(p, 0.0, 0.0))
return toks
class EditDistance(object):
def __init__(self, time_mediated):
self.time_mediated_ = time_mediated
self.scores_ = np.nan # Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic>
self.backtraces_ = (
np.nan
) # Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic> backtraces_;
self.confusion_pairs_ = {}
def cost(self, ref, hyp, code):
if self.time_mediated_:
if code == Code.match:
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end)
elif code == Code.insertion:
return hyp.end - hyp.start
elif code == Code.deletion:
return ref.end - ref.start
else: # substitution
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1
else:
if code == Code.match:
return 0
elif code == Code.insertion or code == Code.deletion:
return 3
else: # substitution
return 4
def get_result(self, refs, hyps):
res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan)
num_rows, num_cols = self.scores_.shape
res.score = self.scores_[num_rows - 1, num_cols - 1]
curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols)
while curr_offset != 0:
curr_row = offset_to_row(curr_offset, num_cols)
curr_col = offset_to_col(curr_offset, num_cols)
prev_offset = self.backtraces_[curr_row, curr_col]
prev_row = offset_to_row(prev_offset, num_cols)
prev_col = offset_to_col(prev_offset, num_cols)
res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++
res.hyps.appendleft(curr_col - 1)
if curr_row - 1 == prev_row and curr_col == prev_col:
res.codes.appendleft(Code.deletion)
elif curr_row == prev_row and curr_col - 1 == prev_col:
res.codes.appendleft(Code.insertion)
else:
# assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col)
ref_str = refs[res.refs[0]].label
hyp_str = hyps[res.hyps[0]].label
if ref_str == hyp_str:
res.codes.appendleft(Code.match)
else:
res.codes.appendleft(Code.substitution)
confusion_pair = "%s -> %s" % (ref_str, hyp_str)
if confusion_pair not in self.confusion_pairs_:
self.confusion_pairs_[confusion_pair] = 1
else:
self.confusion_pairs_[confusion_pair] += 1
curr_offset = prev_offset
return res
def align(self, refs, hyps):
if len(refs) == 0 and len(hyps) == 0:
return np.nan
# NOTE: we're not resetting the values in these matrices because every value
# will be overridden in the loop below. If this assumption doesn't hold,
# be sure to set all entries in self.scores_ and self.backtraces_ to 0.
self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1))
self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1))
num_rows, num_cols = self.scores_.shape
for i in range(num_rows):
for j in range(num_cols):
if i == 0 and j == 0:
self.scores_[i, j] = 0.0
self.backtraces_[i, j] = 0
continue
if i == 0:
self.scores_[i, j] = self.scores_[i, j - 1] + self.cost(
None, hyps[j - 1], Code.insertion
)
self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols)
continue
if j == 0:
self.scores_[i, j] = self.scores_[i - 1, j] + self.cost(
refs[i - 1], None, Code.deletion
)
self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols)
continue
# Below here both i and j are greater than 0
ref = refs[i - 1]
hyp = hyps[j - 1]
best_score = self.scores_[i - 1, j - 1] + (
self.cost(ref, hyp, Code.match)
if (ref.label == hyp.label)
else self.cost(ref, hyp, Code.substitution)
)
prev_row = i - 1
prev_col = j - 1
ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion)
if ins < best_score:
best_score = ins
prev_row = i
prev_col = j - 1
delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion)
if delt < best_score:
best_score = delt
prev_row = i - 1
prev_col = j
self.scores_[i, j] = best_score
self.backtraces_[i, j] = coordinate_to_offset(
prev_row, prev_col, num_cols
)
return self.get_result(refs, hyps)
class WERTransformer(object):
def __init__(self, hyp_str, ref_str, verbose=True):
self.ed_ = EditDistance(False)
self.id2oracle_errs_ = {}
self.utts_ = 0
self.words_ = 0
self.insertions_ = 0
self.deletions_ = 0
self.substitutions_ = 0
self.process(["dummy_str", hyp_str, ref_str])
if verbose:
print("'%s' vs '%s'" % (hyp_str, ref_str))
self.report_result()
def process(self, input): # std::vector<std::string>&& input
if len(input) < 3:
print(
"Input must be of the form <id> ... <hypo> <ref> , got ",
len(input),
" inputs:",
)
return None
# Align
# std::vector<Token> hyps;
# std::vector<Token> refs;
hyps = str2toks(input[-2])
refs = str2toks(input[-1])
alignment = self.ed_.align(refs, hyps)
if alignment is None:
print("Alignment is null")
return np.nan
# Tally errors
ins = 0
dels = 0
subs = 0
for code in alignment.codes:
if code == Code.substitution:
subs += 1
elif code == Code.insertion:
ins += 1
elif code == Code.deletion:
dels += 1
# Output
row = input
row.append(str(len(refs)))
row.append(str(ins))
row.append(str(dels))
row.append(str(subs))
# print(row)
# Accumulate
kIdIndex = 0
kNBestSep = "/"
pieces = input[kIdIndex].split(kNBestSep)
if len(pieces) == 0:
print(
"Error splitting ",
input[kIdIndex],
" on '",
kNBestSep,
"', got empty list",
)
return np.nan
id = pieces[0]
if id not in self.id2oracle_errs_:
self.utts_ += 1
self.words_ += len(refs)
self.insertions_ += ins
self.deletions_ += dels
self.substitutions_ += subs
self.id2oracle_errs_[id] = [ins, dels, subs]
else:
curr_err = ins + dels + subs
prev_err = np.sum(self.id2oracle_errs_[id])
if curr_err < prev_err:
self.id2oracle_errs_[id] = [ins, dels, subs]
return 0
def report_result(self):
# print("---------- Summary ---------------")
if self.words_ == 0:
print("No words counted")
return
# 1-best
best_wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
print(
"\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, "
"%0.2f%% dels, %0.2f%% subs)"
% (
best_wer,
self.utts_,
self.words_,
100.0 * self.insertions_ / self.words_,
100.0 * self.deletions_ / self.words_,
100.0 * self.substitutions_ / self.words_,
)
)
def wer(self):
if self.words_ == 0:
wer = np.nan
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
return wer
def stats(self):
if self.words_ == 0:
stats = {}
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
stats = dict(
{
"wer": wer,
"utts": self.utts_,
"numwords": self.words_,
"ins": self.insertions_,
"dels": self.deletions_,
"subs": self.substitutions_,
"confusion_pairs": self.ed_.confusion_pairs_,
}
)
return stats
def calc_wer(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.wer()
def calc_wer_stats(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.stats()
def get_wer_alignment_codes(hyp_str, ref_str):
"""
INPUT: hypothesis string, reference string
OUTPUT: List of alignment codes (intermediate results from WER computation)
"""
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes
def merge_counts(x, y):
# Merge two hashes which have 'counts' as their values
# This can be used for example to merge confusion pair counts
# conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs'])
for k, v in y.items():
if k not in x:
x[k] = 0
x[k] += v
return x
| 11,842 | 30.002618 | 86 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/data/collaters.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains collection of classes which implement
collate functionalities for various tasks.
Collaters should know what data to expect for each sample
and they should pack / collate them into batches
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
class Seq2SeqCollater(object):
"""
Implements collate function mainly for seq2seq tasks
This expects each sample to contain feature (src_tokens) and
targets.
This collator is also used for aligned training task.
"""
def __init__(
self,
feature_index=0,
label_index=1,
pad_index=1,
eos_index=2,
move_eos_to_beginning=True,
):
self.feature_index = feature_index
self.label_index = label_index
self.pad_index = pad_index
self.eos_index = eos_index
self.move_eos_to_beginning = move_eos_to_beginning
def _collate_frames(self, frames):
"""Convert a list of 2d frames into a padded 3d tensor
Args:
frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
len_max = max(frame.size(0) for frame in frames)
f_dim = frames[0].size(1)
res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
for i, v in enumerate(frames):
res[i, : v.size(0)] = v
return res
def collate(self, samples):
"""
utility function to collate samples into batch for speech recognition.
"""
if len(samples) == 0:
return {}
# parse samples into torch tensors
parsed_samples = []
for s in samples:
# skip invalid samples
if s["data"][self.feature_index] is None:
continue
source = s["data"][self.feature_index]
if isinstance(source, (np.ndarray, np.generic)):
source = torch.from_numpy(source)
target = s["data"][self.label_index]
if isinstance(target, (np.ndarray, np.generic)):
target = torch.from_numpy(target).long()
elif isinstance(target, list):
target = torch.LongTensor(target)
parsed_sample = {"id": s["id"], "source": source, "target": target}
parsed_samples.append(parsed_sample)
samples = parsed_samples
id = torch.LongTensor([s["id"] for s in samples])
frames = self._collate_frames([s["source"] for s in samples])
# sort samples by descending number of frames
frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
frames_lengths, sort_order = frames_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
frames = frames.index_select(0, sort_order)
target = None
target_lengths = None
prev_output_tokens = None
if samples[0].get("target", None) is not None:
ntokens = sum(len(s["target"]) for s in samples)
target = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, sort_order)
target_lengths = torch.LongTensor(
[s["target"].size(0) for s in samples]
).index_select(0, sort_order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=self.move_eos_to_beginning,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
"target": target,
"target_lengths": target_lengths,
"nsentences": len(samples),
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
| 4,812 | 35.462121 | 84 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/data/replabels.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Replabel transforms for use with wav2letter's ASG criterion.
"""
def replabel_symbol(i):
"""
Replabel symbols used in wav2letter, currently just "1", "2", ...
This prevents training with numeral tokens, so this might change in the future
"""
return str(i)
def pack_replabels(tokens, dictionary, max_reps):
"""
Pack a token sequence so that repeated symbols are replaced by replabels
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_value_to_idx = [0] * (max_reps + 1)
for i in range(1, max_reps + 1):
replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i))
result = []
prev_token = -1
num_reps = 0
for token in tokens:
if token == prev_token and num_reps < max_reps:
num_reps += 1
else:
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
num_reps = 0
result.append(token)
prev_token = token
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
return result
def unpack_replabels(tokens, dictionary, max_reps):
"""
Unpack a token sequence so that replabels are replaced by repeated symbols
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_idx_to_value = {}
for i in range(1, max_reps + 1):
replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i
result = []
prev_token = -1
for token in tokens:
try:
for _ in range(replabel_idx_to_value[token]):
result.append(prev_token)
prev_token = -1
except KeyError:
result.append(token)
prev_token = token
return result
| 1,970 | 26.760563 | 82 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/data/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def calc_mean_invstddev(feature):
if len(feature.size()) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = feature.mean(0)
var = feature.var(0)
# avoid division by ~zero
eps = 1e-8
if (var < eps).any():
return mean, 1.0 / (torch.sqrt(var) + eps)
return mean, 1.0 / torch.sqrt(var)
def apply_mv_norm(features):
# If there is less than 2 spectrograms, the variance cannot be computed (is NaN)
# and normalization is not possible, so return the item as it is
if features.size(0) < 2:
return features
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def lengths_to_encoder_padding_mask(lengths, batch_first=False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = 0 for t < lengths[b] and 1 otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) >= lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
def encoder_padding_mask_to_lengths(
encoder_padding_mask, max_lengths, batch_size, device
):
"""
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor
Conventionally, encoder output contains a encoder_padding_mask, which is
a 2-D mask in a shape (T, B), whose (t, b) element indicate whether
encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we
need to convert this mask tensor to a 1-D tensor in shape (B, ), where
[b] denotes the valid length of b-th sequence
Args:
encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,
indicating all are valid
Return:
seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the
number of valid elements of b-th sequence
max_lengths: maximum length of all sequence, if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(0)
batch_size: batch size; if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(1)
device: which device to put the result on
"""
if encoder_padding_mask is None:
return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)
assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match"
assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match"
return max_lengths - torch.sum(encoder_padding_mask, dim=0)
| 3,429 | 32.960396 | 84 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/data/asr_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from fairseq.data import FairseqDataset
from . import data_utils
from .collaters import Seq2SeqCollater
class AsrDataset(FairseqDataset):
"""
A dataset representing speech and corresponding transcription.
Args:
aud_paths: (List[str]): A list of str with paths to audio files.
aud_durations_ms (List[int]): A list of int containing the durations of
audio files.
tgt (List[torch.LongTensor]): A list of LongTensors containing the indices
of target transcriptions.
tgt_dict (~fairseq.data.Dictionary): target vocabulary.
ids (List[str]): A list of utterance IDs.
speakers (List[str]): A list of speakers corresponding to utterances.
num_mel_bins (int): Number of triangular mel-frequency bins (default: 80)
frame_length (float): Frame length in milliseconds (default: 25.0)
frame_shift (float): Frame shift in milliseconds (default: 10.0)
"""
def __init__(
self, aud_paths, aud_durations_ms, tgt,
tgt_dict, ids, speakers,
num_mel_bins=80, frame_length=25.0, frame_shift=10.0
):
assert frame_length > 0
assert frame_shift > 0
assert all(x > frame_length for x in aud_durations_ms)
self.frame_sizes = [
int(1 + (d - frame_length) / frame_shift)
for d in aud_durations_ms
]
assert len(aud_paths) > 0
assert len(aud_paths) == len(aud_durations_ms)
assert len(aud_paths) == len(tgt)
assert len(aud_paths) == len(ids)
assert len(aud_paths) == len(speakers)
self.aud_paths = aud_paths
self.tgt_dict = tgt_dict
self.tgt = tgt
self.ids = ids
self.speakers = speakers
self.num_mel_bins = num_mel_bins
self.frame_length = frame_length
self.frame_shift = frame_shift
self.s2s_collater = Seq2SeqCollater(
0, 1, pad_index=self.tgt_dict.pad(),
eos_index=self.tgt_dict.eos(), move_eos_to_beginning=True
)
def __getitem__(self, index):
import torchaudio
import torchaudio.compliance.kaldi as kaldi
tgt_item = self.tgt[index] if self.tgt is not None else None
path = self.aud_paths[index]
if not os.path.exists(path):
raise FileNotFoundError("Audio file not found: {}".format(path))
sound, sample_rate = torchaudio.load_wav(path)
output = kaldi.fbank(
sound,
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
frame_shift=self.frame_shift
)
output_cmvn = data_utils.apply_mv_norm(output)
return {"id": index, "data": [output_cmvn.detach(), tgt_item]}
def __len__(self):
return len(self.aud_paths)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[int]): sample indices to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
return self.s2s_collater.collate(samples)
def num_tokens(self, index):
return self.frame_sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.frame_sizes[index],
len(self.tgt[index]) if self.tgt is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self))
| 3,870 | 33.5625 | 82 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/data/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .asr_dataset import AsrDataset
__all__ = [
'AsrDataset',
]
| 247 | 21.545455 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/tasks/speech_recognition.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import re
import sys
import torch
from fairseq.data import Dictionary
from fairseq.tasks import FairseqTask, register_task
from examples.speech_recognition.data import AsrDataset
from examples.speech_recognition.data.replabels import replabel_symbol
def get_asr_dataset_from_json(data_json_path, tgt_dict):
"""
Parse data json and create dataset.
See scripts/asr_prep_json.py which pack json from raw files
Json example:
{
"utts": {
"4771-29403-0025": {
"input": {
"length_ms": 170,
"path": "/tmp/file1.flac"
},
"output": {
"text": "HELLO \n",
"token": "HE LLO",
"tokenid": "4815, 861"
}
},
"1564-142299-0096": {
...
}
}
"""
if not os.path.isfile(data_json_path):
raise FileNotFoundError("Dataset not found: {}".format(data_json_path))
with open(data_json_path, "rb") as f:
data_samples = json.load(f)["utts"]
assert len(data_samples) != 0
sorted_samples = sorted(
data_samples.items(),
key=lambda sample: int(sample[1]["input"]["length_ms"]),
reverse=True,
)
aud_paths = [s[1]["input"]["path"] for s in sorted_samples]
ids = [s[0] for s in sorted_samples]
speakers = []
for s in sorted_samples:
m = re.search("(.+?)-(.+?)-(.+?)", s[0])
speakers.append(m.group(1) + "_" + m.group(2))
frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples]
tgt = [
[int(i) for i in s[1]["output"]["tokenid"].split(", ")]
for s in sorted_samples
]
# append eos
tgt = [[*t, tgt_dict.eos()] for t in tgt]
return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers)
@register_task("speech_recognition")
class SpeechRecognitionTask(FairseqTask):
"""
Task for training speech recognition model.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--silence-token", default="\u2581", help="token for silence (used by w2l)"
)
parser.add_argument('--max-source-positions', default=sys.maxsize, type=int, metavar='N',
help='max number of frames in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries)."""
dict_path = os.path.join(args.data, "dict.txt")
if not os.path.isfile(dict_path):
raise FileNotFoundError("Dict not found: {}".format(dict_path))
tgt_dict = Dictionary.load(dict_path)
if args.criterion == "ctc_loss":
tgt_dict.add_symbol("<ctc_blank>")
elif args.criterion == "asg_loss":
for i in range(1, args.max_replabel + 1):
tgt_dict.add_symbol(replabel_symbol(i))
print("| dictionary: {} types".format(len(tgt_dict)))
return cls(args, tgt_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
data_json_path = os.path.join(self.args.data, "{}.json".format(split))
self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)
def build_generator(self, models, args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, self.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, self.target_dictionary)
else:
return super().build_generator(models, args)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.tgt_dict
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
return None
def max_positions(self):
"""Return the max speech and sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
| 5,094 | 34.381944 | 97 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/speech_recognition/tasks/__init__.py
|
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('examples.speech_recognition.tasks.' + task_name)
| 263 | 32 | 81 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/byte_level_bpe/get_bitext.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path as op
import argparse
import os
from multiprocessing import cpu_count
from collections import namedtuple
from typing import Optional, List
import sentencepiece as sp
from fairseq.data.encoders.moses_tokenizer import MosesTokenizer
from fairseq.data.encoders.byte_utils import byte_encode
from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE
from fairseq.data.encoders.characters import Characters
from fairseq.data.encoders.byte_bpe import ByteBPE
from fairseq.data.encoders.bytes import Bytes
SPLITS = ['train', 'valid', 'test']
def _convert_xml(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
ss = s.strip()
if not ss.startswith('<seg'):
continue
ss = ss.replace('</seg>', '').split('">')
assert len(ss) == 2
f_o.write(ss[1].strip() + '\n')
def _convert_train(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
ss = s.strip()
if ss.startswith('<'):
continue
f_o.write(ss.strip() + '\n')
def _get_bytes(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
f_o.write(Bytes.encode(s.strip()) + '\n')
def _get_chars(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
f_o.write(Characters.encode(s.strip()) + '\n')
def pretokenize(in_path: str, out_path: str, src: str, tgt: str):
Args = namedtuple('Args', ['moses_source_lang', 'moses_target_lang',
'moses_no_dash_splits', 'moses_no_escape'])
args = Args(moses_source_lang=src, moses_target_lang=tgt,
moses_no_dash_splits=False, moses_no_escape=False)
pretokenizer = MosesTokenizer(args)
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
f_o.write(pretokenizer.encode(s.strip()) + '\n')
def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str):
with open(out_path, 'w') as f_o:
for lang in [src, tgt]:
with open(f'{in_path_prefix}.{lang}') as f:
for s in f:
f_o.write(byte_encode(s.strip()) + '\n')
def _get_bpe(in_path: str, model_prefix: str, vocab_size: int):
arguments = [
f'--input={in_path}', f'--model_prefix={model_prefix}',
f'--model_type=bpe', f'--vocab_size={vocab_size}',
'--character_coverage=1.0', '--normalization_rule_name=identity',
f'--num_threads={cpu_count()}'
]
sp.SentencePieceTrainer.Train(' '.join(arguments))
def _apply_bbpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple('Args', ['sentencepiece_model_path'])
args = Args(sentencepiece_model_path=model_path)
tokenizer = ByteBPE(args)
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
f_o.write(tokenizer.encode(s.strip()) + '\n')
def _apply_bpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple('Args', ['sentencepiece_model'])
args = Args(sentencepiece_model=model_path)
tokenizer = SentencepieceBPE(args)
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
f_o.write(tokenizer.encode(s.strip()) + '\n')
def _concat_files(in_paths: List[str], out_path: str):
with open(out_path, 'w') as f_o:
for p in in_paths:
with open(p) as f:
for r in f:
f_o.write(r)
def preprocess_iwslt17(root: str, src: str, tgt: str, bpe_size: Optional[int],
need_chars: bool, bbpe_size: Optional[int],
need_bytes: bool):
# extract bitext
in_root = op.join(root, f'{src}-{tgt}')
for lang in [src, tgt]:
_convert_train(
op.join(in_root, f'train.tags.{src}-{tgt}.{lang}'),
op.join(root, f'train.{lang}')
)
_convert_xml(
op.join(in_root, f'IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml'),
op.join(root, f'valid.{lang}')
)
_convert_xml(
op.join(in_root, f'IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml'),
op.join(root, f'test.{lang}')
)
# pre-tokenize
for lang in [src, tgt]:
for split in SPLITS:
pretokenize(op.join(root, f'{split}.{lang}'),
op.join(root, f'{split}.moses.{lang}'), src, tgt)
# tokenize with BPE vocabulary
if bpe_size is not None:
# learn vocabulary
concated_train_path = op.join(root, 'train.all')
_concat_files(
[op.join(root, 'train.moses.fr'), op.join(root, 'train.moses.en')],
concated_train_path
)
bpe_model_prefix = op.join(root, f'spm_bpe{bpe_size}')
_get_bpe(concated_train_path, bpe_model_prefix, bpe_size)
os.remove(concated_train_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bpe(
bpe_model_prefix + '.model',
op.join(root, f'{split}.moses.{lang}'),
op.join(root, f'{split}.moses.bpe{bpe_size}.{lang}')
)
# tokenize with bytes vocabulary
if need_bytes:
for lang in [src, tgt]:
for split in SPLITS:
_get_bytes(op.join(root, f'{split}.moses.{lang}'),
op.join(root, f'{split}.moses.bytes.{lang}'))
# tokenize with characters vocabulary
if need_chars:
for lang in [src, tgt]:
for split in SPLITS:
_get_chars(op.join(root, f'{split}.moses.{lang}'),
op.join(root, f'{split}.moses.chars.{lang}'))
# tokenize with byte-level BPE vocabulary
if bbpe_size is not None:
# learn vocabulary
bchar_path = op.join(root, 'train.bchar')
_convert_to_bchar(op.join(root, 'train.moses'), src, tgt, bchar_path)
bbpe_model_prefix = op.join(root, f'spm_bbpe{bbpe_size}')
_get_bpe(bchar_path, bbpe_model_prefix, bbpe_size)
os.remove(bchar_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bbpe(
bbpe_model_prefix + '.model',
op.join(root, f'{split}.moses.{lang}'),
op.join(root, f'{split}.moses.bbpe{bbpe_size}.{lang}')
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='data')
parser.add_argument('--bpe-vocab', default=None, type=int,
help='Generate tokenized bitext with BPE of size K.'
'Default to None (disabled).')
parser.add_argument('--bbpe-vocab', default=None, type=int,
help='Generate tokenized bitext with BBPE of size K.'
'Default to None (disabled).')
parser.add_argument('--byte-vocab', action='store_true',
help='Generate tokenized bitext with bytes vocabulary')
parser.add_argument('--char-vocab', action='store_true',
help='Generate tokenized bitext with chars vocabulary')
args = parser.parse_args()
preprocess_iwslt17(args.root, 'fr', 'en', args.bpe_vocab, args.char_vocab,
args.bbpe_vocab, args.byte_vocab)
if __name__ == '__main__':
main()
| 7,743 | 36.410628 | 79 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/byte_level_bpe/gru_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import TransformerModel, TransformerEncoder
@register_model("gru_transformer")
class GRUTransformerModel(TransformerModel):
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return GRUTransformerEncoder(args, src_dict, embed_tokens)
class GRUTransformerEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.emb_ctx = nn.GRU(input_size=embed_tokens.embedding_dim,
hidden_size=embed_tokens.embedding_dim // 2,
num_layers=1, bidirectional=True)
def forward_embedding(self, src_tokens):
# embed tokens and positions
x = embed = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
# contextualize embeddings
x = x.transpose(0, 1)
x = self.dropout_module(x)
x, _ = self.emb_ctx.forward(x)
x = x.transpose(0, 1)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
@register_model_architecture("gru_transformer", "gru_transformer")
def gru_transformer_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.layer_wise_attention = getattr(args, "layer_wise_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
@register_model_architecture("gru_transformer", "gru_transformer_big")
def gru_transformer_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
gru_transformer_base_architecture(args)
| 5,028 | 46.895238 | 87 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/simultaneous_translation/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import criterions, models, eval # noqa
| 225 | 31.285714 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/simultaneous_translation/modules/monotonic_multihead_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
from fairseq import utils
from fairseq.modules import MultiheadAttention
from examples.simultaneous_translation.utils.functions import (
exclusive_cumprod,
lengths_to_mask
)
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.utils import convert_padding_direction
from . import register_monotonic_attention
@with_incremental_state
class MonotonicAttention(nn.Module):
"""
Abstract class of monotonic attentions
"""
def __init__(self, args):
self.eps = args.attention_eps
self.mass_preservation = args.mass_preservation
self.noise_mean = args.noise_mean
self.noise_var = args.noise_var
self.energy_bias_init = args.energy_bias_init
self.energy_bias = (
nn.Parameter(self.energy_bias_init * torch.ones([1]))
if args.energy_bias is True else 0
)
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--no-mass-preservation', action="store_false", dest="mass_preservation",
help='Do not stay on the last token when decoding')
parser.add_argument('--mass-preservation', action="store_true", dest="mass_preservation",
help='Stay on the last token when decoding')
parser.set_defaults(mass_preservation=True)
parser.add_argument('--noise-var', type=float, default=1.0,
help='Variance of discretness noise')
parser.add_argument('--noise-mean', type=float, default=0.0,
help='Mean of discretness noise')
parser.add_argument('--energy-bias', action="store_true", default=False,
help='Bias for energy')
parser.add_argument('--energy-bias-init', type=float, default=-2.0,
help='Initial value of the bias for energy')
parser.add_argument('--attention-eps', type=float, default=1e-6,
help='Epsilon when calculating expected attention')
# fmt: on
def p_choose(self, *args):
raise NotImplementedError
def input_projections(self, *args):
raise NotImplementedError
def attn_energy(self, q_proj, k_proj, key_padding_mask=None):
"""
Calculating monotonic energies
============================================================
Expected input size
q_proj: bsz * num_heads, tgt_len, self.head_dim
k_proj: bsz * num_heads, src_len, self.head_dim
key_padding_mask: bsz, src_len
attn_mask: tgt_len, src_len
"""
bsz, tgt_len, embed_dim = q_proj.size()
bsz = bsz // self.num_heads
src_len = k_proj.size(1)
attn_energy = torch.bmm(q_proj, k_proj.transpose(1, 2)) + self.energy_bias
attn_energy = attn_energy.view(bsz, self.num_heads, tgt_len, src_len)
if key_padding_mask is not None:
attn_energy = attn_energy.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).bool(),
float('-inf'),
)
return attn_energy
def expected_alignment_train(self, p_choose, key_padding_mask):
"""
Calculating expected alignment for MMA
Mask is not need because p_choose will be 0 if masked
q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j}
a_ij = p_ij q_ij
parellel solution:
ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi))
============================================================
Expected input size
p_choose: bsz * num_heads, tgt_len, src_len
"""
# p_choose: bsz * num_heads, tgt_len, src_len
bsz_num_heads, tgt_len, src_len = p_choose.size()
# cumprod_1mp : bsz * num_heads, tgt_len, src_len
cumprod_1mp = exclusive_cumprod(1 - p_choose, dim=2, eps=self.eps)
cumprod_1mp_clamp = torch.clamp(cumprod_1mp, self.eps, 1.0)
init_attention = p_choose.new_zeros([bsz_num_heads, 1, src_len])
init_attention[:, :, 0] = 1.0
previous_attn = [init_attention]
for i in range(tgt_len):
# p_choose: bsz * num_heads, tgt_len, src_len
# cumprod_1mp_clamp : bsz * num_heads, tgt_len, src_len
# previous_attn[i]: bsz * num_heads, 1, src_len
# alpha_i: bsz * num_heads, src_len
alpha_i = (
p_choose[:, i]
* cumprod_1mp[:, i]
* torch.cumsum(
previous_attn[i][:, 0] / cumprod_1mp_clamp[:, i],
dim=1
)
).clamp(0, 1.0)
previous_attn.append(alpha_i.unsqueeze(1))
# alpha: bsz * num_heads, tgt_len, src_len
alpha = torch.cat(previous_attn[1:], dim=1)
if self.mass_preservation:
# Last token has the residual probabilities
alpha[:, :, -1] = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0.0, 1.0)
assert not torch.isnan(alpha).any(), "NaN detected in alpha."
return alpha
def expected_alignment_infer(self, p_choose, key_padding_mask, incremental_state):
"""
Calculating mo alignment for MMA during inference time
============================================================
Expected input size
p_choose: bsz * num_heads, tgt_len, src_len
key_padding_mask: bsz * src_len
incremental_state: dict
"""
# p_choose: bsz * self.num_heads, src_len
bsz_num_heads, tgt_len, src_len = p_choose.size()
# One token at a time
assert tgt_len == 1
p_choose = p_choose[:, 0, :]
monotonic_cache = self._get_monotonic_buffer(incremental_state)
# prev_monotonic_step: bsz, num_heads
bsz = bsz_num_heads // self.num_heads
prev_monotonic_step = monotonic_cache.get(
"step",
p_choose.new_zeros([bsz, self.num_heads]).long()
)
bsz, num_heads = prev_monotonic_step.size()
assert num_heads == self.num_heads
assert bsz * num_heads == bsz_num_heads
# p_choose: bsz, num_heads, src_len
p_choose = p_choose.view(bsz, num_heads, src_len)
if key_padding_mask is not None:
src_lengths = src_len - \
key_padding_mask.sum(dim=1, keepdim=True).long()
else:
src_lengths = prev_monotonic_step.new_ones(bsz, 1) * src_len
# src_lengths: bsz, num_heads
src_lengths = src_lengths.expand_as(prev_monotonic_step)
# new_monotonic_step: bsz, num_heads
new_monotonic_step = prev_monotonic_step
step_offset = 0
if key_padding_mask is not None:
if key_padding_mask[:, 0].any():
# left_pad_source = True:
step_offset = key_padding_mask.sum(dim=-1, keepdim=True)
max_steps = (
src_lengths - 1 if self.mass_preservation
else src_lengths
)
# finish_read: bsz, num_heads
finish_read = new_monotonic_step.eq(max_steps)
while finish_read.sum().item() < bsz * self.num_heads:
# p_choose: bsz * self.num_heads, src_len
# only choose the p at monotonic steps
# p_choose_i: bsz , self.num_heads
p_choose_i = (
p_choose
.gather(
2,
(step_offset + new_monotonic_step).unsqueeze(2)
.clamp(0, src_len - 1)
)
).squeeze(2)
action = (
(p_choose_i < 0.5)
.type_as(prev_monotonic_step)
.masked_fill(finish_read, 0)
)
# 1 x bsz
# sample actions on unfinished seq
# 1 means stay, finish reading
# 0 means leave, continue reading
# dist = torch.distributions.bernoulli.Bernoulli(p_choose)
# action = dist.sample().type_as(finish_read) * (1 - finish_read)
new_monotonic_step += action
finish_read = new_monotonic_step.eq(max_steps) | (action == 0)
# finish_read = (~ (finish_read.sum(dim=1, keepdim=True) < self.num_heads / 2)) | finish_read
monotonic_cache["step"] = new_monotonic_step
# alpha: bsz * num_heads, 1, src_len
# new_monotonic_step: bsz, num_heads
alpha = (
p_choose
.new_zeros([bsz * self.num_heads, src_len])
.scatter(
1,
(step_offset + new_monotonic_step).view(bsz *
self.num_heads, 1).clamp(0, src_len - 1),
1
)
)
if not self.mass_preservation:
alpha = alpha.masked_fill(
(new_monotonic_step == max_steps).view(bsz * self.num_heads, 1),
0
)
alpha = alpha.unsqueeze(1)
self._set_monotonic_buffer(incremental_state, monotonic_cache)
return alpha
def v_proj_output(self, value):
raise NotImplementedError
def forward(
self, query, key, value,
key_padding_mask=None, incremental_state=None, *args, **kwargs,
):
tgt_len, bsz, embed_dim = query.size()
src_len = value.size(0)
# stepwise prob
# p_choose: bsz * self.num_heads, tgt_len, src_len
p_choose = self.p_choose(query, key, key_padding_mask)
# expected alignment alpha
# bsz * self.num_heads, tgt_len, src_len
if incremental_state is not None:
alpha = self.expected_alignment_infer(p_choose, key_padding_mask, incremental_state)
else:
alpha = self.expected_alignment_train(p_choose, key_padding_mask)
# expected attention beta
# bsz * self.num_heads, tgt_len, src_len
beta = self.expected_attention(alpha, query, key, value, key_padding_mask, incremental_state)
attn_weights = beta
v_proj = self.v_proj_output(value)
attn = torch.bmm(attn_weights.type_as(v_proj), v_proj)
attn = (
attn
.transpose(0, 1)
.contiguous()
.view(tgt_len, bsz, embed_dim)
)
attn = self.out_proj(attn)
beta = beta.view(bsz, self.num_heads, tgt_len, src_len)
alpha = alpha.view(bsz, self.num_heads, tgt_len, src_len)
p_choose = p_choose.view(bsz, self.num_heads, tgt_len, src_len)
return attn, {"alpha": alpha, "beta": beta, "p_choose": p_choose}
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
super().reorder_incremental_state(incremental_state, new_order)
input_buffer = self._get_monotonic_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_monotonic_buffer(incremental_state, input_buffer)
def _get_monotonic_buffer(self, incremental_state):
return utils.get_incremental_state(
self,
incremental_state,
'monotonic',
) or {}
def _set_monotonic_buffer(self, incremental_state, buffer):
utils.set_incremental_state(
self,
incremental_state,
'monotonic',
buffer,
)
def get_pointer(self, incremental_state):
return utils.get_incremental_state(
self,
incremental_state,
'monotonic',
) or {}
def get_fastest_pointer(self, incremental_state):
return self.get_pointer(incremental_state)["step"].max(0)[0]
def set_pointer(self, incremental_state, p_choose):
curr_pointer = self.get_pointer(incremental_state)
if len(curr_pointer) == 0:
buffer = torch.zeros_like(p_choose)
else:
buffer = self.get_pointer(incremental_state)["step"]
buffer += (p_choose < 0.5).type_as(buffer)
utils.set_incremental_state(
self,
incremental_state,
'monotonic',
{"step": buffer},
)
@register_monotonic_attention("hard_aligned")
class MonotonicMultiheadAttentionHard(MonotonicAttention, MultiheadAttention):
def __init__(self, args):
MultiheadAttention.__init__(
self,
embed_dim=args.decoder_embed_dim,
num_heads=args.decoder_attention_heads,
kdim=getattr(args, 'encoder_embed_dim', None),
vdim=getattr(args, 'encoder_embed_dim', None),
dropout=args.attention_dropout,
encoder_decoder_attention=True
)
MonotonicAttention.__init__(self, args)
self.k_in_proj = {"monotonic": self.k_proj}
self.q_in_proj = {"monotonic": self.q_proj}
self.v_in_proj = {"output": self.v_proj}
def input_projections(self, query, key, value, name):
"""
Prepare inputs for multihead attention
============================================================
Expected input size
query: tgt_len, bsz, embed_dim
key: src_len, bsz, embed_dim
value: src_len, bsz, embed_dim
name: monotonic or soft
"""
if query is not None:
bsz = query.size(1)
q = self.q_in_proj[name](query)
q *= self.scaling
q = q.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
else:
q = None
if key is not None:
bsz = key.size(1)
k = self.k_in_proj[name](key)
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
else:
k = None
if value is not None:
bsz = value.size(1)
v = self.v_in_proj[name](value)
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
else:
v = None
return q, k, v
def p_choose(self, query, key, key_padding_mask=None):
"""
Calculating step wise prob for reading and writing
1 to read, 0 to write
============================================================
Expected input size
query: bsz, tgt_len, embed_dim
key: bsz, src_len, embed_dim
value: bsz, src_len, embed_dim
key_padding_mask: bsz, src_len
attn_mask: bsz, src_len
query: bsz, tgt_len, embed_dim
"""
# prepare inputs
q_proj, k_proj, _ = self.input_projections(query, key, None, "monotonic")
# attention energy
attn_energy = self.attn_energy(q_proj, k_proj, key_padding_mask)
noise = 0
if self.training:
# add noise here to encourage discretness
noise = (
torch
.normal(self.noise_mean, self.noise_var, attn_energy.size())
.type_as(attn_energy)
.to(attn_energy.device)
)
p_choose = torch.sigmoid(attn_energy + noise)
_, _, tgt_len, src_len = p_choose.size()
# p_choose: bsz * self.num_heads, tgt_len, src_len
return p_choose.view(-1, tgt_len, src_len)
def expected_attention(self, alpha, *args):
'''
For MMA-H, beta = alpha
'''
return alpha
def v_proj_output(self, value):
_, _, v_proj = self.input_projections(None, None, value, "output")
return v_proj
@register_monotonic_attention("infinite_lookback")
class MonotonicMultiheadAttentionInfiniteLookback(MonotonicMultiheadAttentionHard):
def __init__(self, args):
super().__init__(args)
self.init_soft_attention()
def init_soft_attention(self):
self.k_proj_soft = nn.Linear(self.kdim, self.embed_dim, bias=True)
self.q_proj_soft = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.k_in_proj["soft"] = self.k_proj_soft
self.q_in_proj["soft"] = self.q_proj_soft
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_in_proj["soft"].weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_in_proj["soft"].weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_in_proj["soft"].weight)
nn.init.xavier_uniform_(self.q_in_proj["soft"].weight)
def expected_attention(self, alpha, query, key, value, key_padding_mask, incremental_state):
# monotonic attention, we will calculate milk here
bsz_x_num_heads, tgt_len, src_len = alpha.size()
bsz = int(bsz_x_num_heads / self.num_heads)
q, k, _ = self.input_projections(query, key, None, "soft")
soft_energy = self.attn_energy(q, k, key_padding_mask)
assert list(soft_energy.size()) == [bsz, self.num_heads, tgt_len, src_len]
soft_energy = soft_energy.view(bsz * self.num_heads, tgt_len, src_len)
if incremental_state is not None:
monotonic_cache = self._get_monotonic_buffer(incremental_state)
monotonic_step = monotonic_cache["step"] + 1
step_offset = 0
if key_padding_mask is not None:
if key_padding_mask[:, 0].any():
# left_pad_source = True:
step_offset = key_padding_mask.sum(dim=-1, keepdim=True)
monotonic_step += step_offset
mask = lengths_to_mask(
monotonic_step.view(-1), soft_energy.size(2), 1).unsqueeze(1)
soft_energy = soft_energy.masked_fill(~ mask.bool(), float('-inf'))
soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0]
exp_soft_energy = torch.exp(soft_energy)
exp_soft_energy_sum = exp_soft_energy.sum(dim=2)
beta = exp_soft_energy / exp_soft_energy_sum.unsqueeze(2)
else:
# bsz * num_heads, tgt_len, src_len
soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0]
exp_soft_energy = torch.exp(soft_energy)
exp_soft_energy_cumsum = torch.cumsum(exp_soft_energy, dim=2)
if key_padding_mask is not None:
if key_padding_mask.any():
exp_soft_energy_cumsum = (
exp_soft_energy_cumsum.view(-1, self.num_heads, tgt_len, src_len)
.masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(1), self.eps)
.view(-1, tgt_len, src_len)
)
inner_items = alpha / exp_soft_energy_cumsum
beta = exp_soft_energy * torch.cumsum(inner_items.flip(dims=[2]), dim=2).flip(dims=[2])
beta = self.dropout_module(beta)
assert not torch.isnan(beta).any(), "NaN detected in beta."
return beta
@register_monotonic_attention("waitk")
class MonotonicMultiheadAttentionWaitk(MonotonicMultiheadAttentionInfiniteLookback):
def __init__(self, args):
super().__init__(args)
self.q_in_proj["soft"] = self.q_in_proj["monotonic"]
self.k_in_proj["soft"] = self.k_in_proj["monotonic"]
self.waitk_lagging = args.waitk_lagging
assert self.waitk_lagging > 0, f"Lagging has to been larger than 0, get {self.waitk_lagging}."
@staticmethod
def add_args(parser):
super(
MonotonicMultiheadAttentionWaitk,
MonotonicMultiheadAttentionWaitk,
).add_args(parser)
parser.add_argument('--waitk-lagging', type=int, required=True,
help='Wait k lagging')
def p_choose(self, query, key, key_padding_mask=None, attn_mask=None, incremental_state=None):
"""
query: bsz, tgt_len
key: bsz, src_len
key_padding_mask: bsz, src_len
"""
src_len, bsz, _ = key.size()
tgt_len, bsz, _ = query.size()
p_choose = query.new_ones(bsz, tgt_len, src_len)
p_choose = torch.tril(p_choose, diagonal=self.waitk_lagging - 1)
p_choose = torch.triu(p_choose, diagonal=self.waitk_lagging - 1)
if key_padding_mask is not None and key_padding_mask[:, 0].eq(1).any():
# Left pad source
# add -1 to the end
p_choose = p_choose.masked_fill(key_padding_mask.float().flip(1).unsqueeze(1).bool(), -1)
p_choose = convert_padding_direction(p_choose.view(-1, src_len).long(), padding_idx=-1, right_to_left=True)
p_choose = p_choose.view(bsz, tgt_len, src_len).type_as(query)
# remove -1
p_choose[p_choose.eq(-1)] = 0
# Extend to each head
p_choose = (
p_choose.contiguous().unsqueeze(1)
.expand(-1, self.num_heads, -1, -1).contiguous()
.view(-1, tgt_len, src_len)
)
return p_choose
| 21,349 | 35.125212 | 119 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.modules import (
LayerNorm,
TransformerEncoderLayer,
TransformerDecoderLayer
)
from . import build_monotonic_attention
class TransformerMonotonicEncoderLayer(TransformerEncoderLayer):
def forward(self, x, encoder_padding_mask):
seq_len, _, _ = x.size()
attn_mask = x.new_ones([seq_len, seq_len]).triu(1)
attn_mask = attn_mask.masked_fill(attn_mask.bool(), float('-inf'))
return super().forward(x, encoder_padding_mask, attn_mask)
class TransformerMonotonicDecoderLayer(TransformerDecoderLayer):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__(
args,
no_encoder_attn=True,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn
)
self.encoder_attn = build_monotonic_attention(args)
self.encoder_attn_layer_norm = LayerNorm(
self.embed_dim,
export=getattr(args, 'char_inputs', False)
)
def prune_incremental_state(self, incremental_state):
def prune(module):
input_buffer = module._get_input_buffer(incremental_state)
for key in ["prev_key", "prev_value"]:
if input_buffer[key].size(2) > 1:
input_buffer[key] = input_buffer[key][:, :, :-1, :]
else:
input_buffer = {}
break
module._set_input_buffer(incremental_state, input_buffer)
prune(self.self_attn)
def get_steps(self, incremental_state):
return (
self.encoder_attn
._get_monotonic_buffer(
incremental_state
).get("step", 0)
)
| 1,919 | 32.103448 | 92 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/simultaneous_translation/modules/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from fairseq import registry
(
build_monotonic_attention,
register_monotonic_attention,
MONOTONIC_ATTENTION_REGISTRY
) = registry.setup_registry('--simul-type')
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
model_name = file[:file.find('.py')]
importlib.import_module('examples.simultaneous_translation.modules.' + model_name)
| 625 | 30.3 | 90 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/simultaneous_translation/eval/evaluate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from client import SimulSTEvaluationService, SimulSTLocalEvaluationService
from fairseq.registry import REGISTRIES
from agents import build_agent
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 12321
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', type=str, default=DEFAULT_HOSTNAME,
help='server hostname')
parser.add_argument('--port', type=int, default=DEFAULT_PORT,
help='server port number')
parser.add_argument('--agent-type', default='simul_trans_text',
help='Agent type')
parser.add_argument('--scorer-type', default='text',
help='Scorer type')
parser.add_argument('--start-idx', type=int, default=0,
help='Start index of the sentence to evaluate')
parser.add_argument('--end-idx', type=int, default=float('inf'),
help='End index of the sentence to evaluate')
parser.add_argument('--scores', action="store_true",
help='Request scores from server')
parser.add_argument('--reset-server', action="store_true",
help='Reset the server')
parser.add_argument('--num-threads', type=int, default=10,
help='Number of threads used by agent')
parser.add_argument('--local', action="store_true", default=False,
help='Local evaluation')
args, _ = parser.parse_known_args()
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.local:
session = SimulSTLocalEvaluationService(args)
else:
session = SimulSTEvaluationService(args.hostname, args.port)
if args.reset_server:
session.new_session()
if args.agent_type is not None:
agent = build_agent(args)
agent.decode(session, args.start_idx, args.end_idx, args.num_threads)
if args.scores:
session.get_scores()
print(session.get_scores())
| 2,494 | 34.140845 | 77 |
py
|
RegularizedBN
|
RegularizedBN-main/examples/simultaneous_translation/eval/server.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import json
from tornado import web, ioloop
from scorers import build_scorer
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 12321
class ScorerHandler(web.RequestHandler):
def initialize(self, scorer):
self.scorer = scorer
class EvalSessionHandler(ScorerHandler):
def post(self):
self.scorer.reset()
def get(self):
r = json.dumps(self.scorer.get_info())
self.write(r)
class ResultHandler(ScorerHandler):
def get(self):
r = json.dumps(self.scorer.score())
self.write(r)
class SourceHandler(ScorerHandler):
def get(self):
sent_id = int(self.get_argument('sent_id'))
segment_size = None
if "segment_size" in self.request.arguments:
string = self.get_argument('segment_size')
if len(string) > 0:
segment_size = int(string)
r = json.dumps(self.scorer.send_src(int(sent_id), segment_size))
self.write(r)
class HypothesisHandler(ScorerHandler):
def put(self):
sent_id = int(self.get_argument('sent_id'))
list_of_tokens = self.request.body.decode('utf-8').strip().split()
self.scorer.recv_hyp(sent_id, list_of_tokens)
def add_args():
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument('--hostname', type=str, default=DEFAULT_HOSTNAME,
help='Server hostname')
parser.add_argument('--port', type=int, default=DEFAULT_PORT,
help='Server port number')
args, _ = parser.parse_known_args()
# fmt: on
return args
def start_server(scorer, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, debug=False):
app = web.Application([
(r'/result', ResultHandler, dict(scorer=scorer)),
(r'/src', SourceHandler, dict(scorer=scorer)),
(r'/hypo', HypothesisHandler, dict(scorer=scorer)),
(r'/', EvalSessionHandler, dict(scorer=scorer)),
], debug=debug)
app.listen(port, max_buffer_size=1024 ** 3)
sys.stdout.write(f"Evaluation Server Started. Listening to port {port}\n")
ioloop.IOLoop.current().start()
if __name__ == '__main__':
args = add_args()
scorer = build_scorer(args)
start_server(scorer, args.hostname, args.port, args.debug)
| 2,458 | 27.929412 | 84 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.