repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
RegularizedBN
|
RegularizedBN-main/fairseq/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['pdb']
__version__ = '0.9.0'
import sys
# backwards compatibility to support `from fairseq.meters import AverageMeter`
from fairseq.logging import meters, metrics, progress_bar # noqa
sys.modules['fairseq.meters'] = meters
sys.modules['fairseq.metrics'] = metrics
sys.modules['fairseq.progress_bar'] = progress_bar
import fairseq.criterions # noqa
import fairseq.models # noqa
import fairseq.modules # noqa
import fairseq.optim # noqa
import fairseq.optim.lr_scheduler # noqa
import fairseq.pdb # noqa
import fairseq.scoring # noqa
import fairseq.tasks # noqa
import fairseq.token_generation_constraints # noqa
import fairseq.benchmark # noqa
import fairseq.model_parallel # noqa
| 885 | 29.551724 | 78 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/search.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional, List
import torch
import torch.nn as nn
from torch import Tensor
from fairseq.token_generation_constraints import ConstraintState, UnorderedConstraintState, OrderedConstraintState
class Search(nn.Module):
def __init__(self, tgt_dict):
super().__init__()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.src_lengths = torch.tensor(-1)
self.supports_constraints = False
def step(self, step, lprobs, scores):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
"""Initialize constraint states for constrained decoding (if supported).
Args:
batch_constraints: (torch.Tensor, optional)
the list of constraints, in packed form
beam_size: (int)
the beam size
Returns:
*encoder_out* rearranged according to *new_order*
"""
pass
def prune_sentences(self, batch_idxs: Tensor):
"""
Removes constraint states for completed sentences (if supported).
This is called from sequence_generator._generate() when sentences are
deleted from the batch.
Args:
batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
"""
pass
def update_constraints(self, active_hypos: Tensor):
"""
Updates the constraint states by selecting the beam items that are retained.
This is called at each time step of sequence_generator._generate() when
the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
Args:
active_hypos: (batch size, beam size)
list of integers denoting, for each sentence, which beam candidate items
should be kept.
"""
pass
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.constraint_states = None
@torch.jit.export
def step(self, step: int, lprobs, scores: Optional[Tensor]):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class LexicallyConstrainedBeamSearch(Search):
"""Implements lexically constrained beam search as described in
Fast Lexically Constrained Decoding with Dynamic Beam
Allocation for Neural Machine Translation. Post & Vilar,
NAACL 2018. https://www.aclweb.org/anthology/N18-1119/
and
Improved Lexically Constrained Decoding for Translation and
Monolingual Rewriting. Hu et al, NAACL
2019. https://www.aclweb.org/anthology/N19-1090/
This is accomplished by maintaining, for each beam hypothesis, a
ConstraintState object (see constraints.py) that tracks which
constraints have been generated and using this information to
shape the beam for each input sentence.
"""
def __init__(self, tgt_dict, representation):
super().__init__(tgt_dict)
self.representation = representation
self.vocab_size = len(tgt_dict)
self.num_cands = 0
self.supports_constraints = True
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
self.constraint_states = []
for constraint_tensor in batch_constraints:
if self.representation == "ordered":
constraint_state = OrderedConstraintState.create(constraint_tensor)
elif self.representation == "unordered":
constraint_state = UnorderedConstraintState.create(constraint_tensor)
self.constraint_states.append([constraint_state for i in range(beam_size)])
@torch.jit.export
def prune_sentences(self, batch_idxs: Tensor):
self.constraint_states = [self.constraint_states[i] for i in batch_idxs.tolist()]
@torch.jit.export
def update_constraints(self, active_hypos: Tensor):
if self.constraint_states:
batch_size = active_hypos.size(0)
for sentid in range(batch_size):
self.constraint_states[sentid] = [self.constraint_states[sentid][i] for i in active_hypos[sentid]]
@torch.jit.export
def step(self, step: int, lprobs: Tensor, scores: Optional[Tensor]):
"""
A constrained step builds a large candidates list from the following:
- the top 2 * {beam_size} items over the whole beam
- for each item in the beam
- the top {each_k} (default 1)
- all next constraints
We then compute the constrained state of each beam item, and assign
stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so
on. We then sort by (stripe, score), and truncate the list at
2 * beam size.
Args:
step: the decoder step
lprobs: (batch size, beam size, target vocab)
the target-vocab distributions for each item in the beam.
Retrun: A tuple of (scores, indices, beams, constraints) where:
scores: (batch, output beam size)
the scores of the chosen elements
indices: (batch, output beam size)
the target vocab indices of the chosen elements
beams: (batch, output beam size)
the 0-indexed hypothesis ids of the chosen elements
constraints: (batch, output beam size)
the new constraint states
"""
each_k = 1
device = lprobs.device
batch_size, beam_size, vocab_size = lprobs.size()
self.num_cands = min(
# Just take the k-best. We'll get another k from the 1-best from each
# row, plus more from the constraints
beam_size * 2,
lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad
)
# STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items
constraint_states = self.constraint_states
if constraint_states and step > 0:
not_finished_indices = []
for sentno, sent_constraints in enumerate(constraint_states):
for beamno, state in enumerate(sent_constraints):
index = sentno * beam_size + beamno
if not state.finished:
not_finished_indices.append(index)
not_finished_indices = torch.tensor(not_finished_indices)
if not_finished_indices.numel() > 0:
lprobs.view(batch_size * beam_size, -1)[not_finished_indices, self.eos] = -math.inf
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam entry for each batch item
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(batch_size, -1),
self.num_cands,
)
scores_buf, indices_buf = top_prediction
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# Short circuit if there are no constraints in this batch
if not constraint_states:
return scores_buf, indices_buf, beams_buf
# STEP 1: get top-1 from each hypothesis across all sentences in the batch
if step > 0:
top_scores, top_indices = torch.topk(
lprobs.view(batch_size * beam_size, -1),
k=each_k,
dim=1,
)
top_scores = top_scores.view(batch_size, -1)
top_indices = top_indices.view(batch_size, -1)
scores_buf = torch.cat((scores_buf, top_scores), dim=1)
indices_buf = torch.cat((indices_buf, top_indices), dim=1)
new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1)
beams_buf = torch.cat((beams_buf, new_beams), dim=1)
# Now, process sentences in the batch one by one.
new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device)
new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
for sentno, states in enumerate(constraint_states):
scores, indices, beams, new_states = self.step_sentence(step,
sentno,
lprobs[sentno],
constraint_states[sentno],
beams_buf[sentno].clone(),
indices_buf[sentno].clone(),
scores_buf[sentno].clone())
new_scores_buf[sentno] = scores
new_indices_buf[sentno] = indices
new_beams_buf[sentno] = beams
self.constraint_states[sentno] = new_states
return new_scores_buf, new_indices_buf, new_beams_buf
@torch.jit.export
def step_sentence(self,
step: int,
sentno: int,
lprobs: Tensor,
constraint_states: List[List[ConstraintState]],
beams_buf: Tensor,
indices_buf: Tensor,
scores_buf: Tensor):
"""Does per-sentence processing. Adds all constraints for each
hypothesis to the list of candidates; then removes duplicates,
sorts, and dynamically stripes across the banks. All tensor inputs
are collapsed to those pertaining to a single input sentence.
"""
device = lprobs.device
# STEP 2: Add all constraints for each beam item
for beamno, state in enumerate(constraint_states):
next_tokens = torch.tensor(list(state.next_tokens()), device=device).long()
if next_tokens.numel() != 0:
indices_buf = torch.cat((indices_buf, next_tokens))
next_beams = torch.tensor(beamno, device=device).repeat(next_tokens.size(0)).long()
beams_buf = torch.cat((beams_buf, next_beams))
next_values = lprobs[beamno].take(next_tokens.view(-1))
scores_buf = torch.cat((scores_buf, next_values))
# At the 0th time step, there is just one beam item
if step == 0:
break
# STEP 3: Compute the "bank" for each candidate. This is the
# number of constraints it's generated. We need this so that
# we can do round-robin allocation of the beam across these
# banks. If C is the number of constraints, we select the best
# item in bank C, then the best in bank C-1, etc, followed by
# the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so
# on, until the maximum beam size. We accomplish this by
# creating a sort key and striping across the banks.
# Compute the new states for all candidates
cands_size = indices_buf.size(0)
constraint_states = [constraint_states[beams_buf[i]].advance(indices_buf[i])
for i in range(cands_size)]
banks = torch.tensor([state.bank for state in constraint_states], device=device)
# STEP 4: Sort
num_constraint_tokens = len(state.tokens)
# Sort by keys (bank, score) (i.e., sort banks together, and scores
# within banks). AFAIK pytorch doesn't support either stable sort or
# multi-key sorting, so we have to hack this.
MAX_SCORE = -100
sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf
sort_values, sort_indices = sort_key.sort(dim=0, descending=True)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
banks = banks[sort_indices]
# Sort the constraints to follow suit
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 5: Remove duplicates. The topk calls (overall and
# per-row) plus the per-row generation of constraints will
# produce duplicates. Here we remove them.
def roll(t):
"""Rolls a 1d tensor left by 1.
[0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3]
"""
return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0)
# We map candidates (beam, token_id) to a single dimension.
# This is then shifted by 1. We can then easily identify
# duplicates and create a mask that identifies unique
# extensions.
uniques_mask = (beams_buf * (self.vocab_size + 1) + indices_buf)
uniques_mask = roll(uniques_mask) != uniques_mask
# Use the mask to pare down the data structures
scores_buf = torch.masked_select(scores_buf, uniques_mask)
indices_buf = torch.masked_select(indices_buf, uniques_mask)
beams_buf = torch.masked_select(beams_buf, uniques_mask)
banks = torch.masked_select(banks, uniques_mask)
i = 1
for mask in uniques_mask[1:]:
if not mask:
constraint_states.pop(i)
i += mask
# STEP 6: Assign IDs round-robin across banks, sort, and
# truncate. Now that the candidates are sorted by (bank,
# score) and uniqed, we dynamically allocate the {beam_size}
# beam by striping across the candidates. These stripes will
# be used as sort keys to do round-robin selection. This is
# accomplished in a single pass with offsets. Sorting by
# highest-banks (furthest-along hypotheses) first ensures
# progress through the constraints.
#
# e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0
# OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1
# NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7
# = 0 5 10 1 6 11 13 2 7 12 3 8
#
# Sorting by this then gives the following banks:
#
# 3 2 1 0 3 2 1 0 3 2 1 2
#
# We'll take the top {beam_size} of these.
stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)]
stripes = torch.zeros_like(banks)
cur_bank_count = -1
cur_bank = banks[0]
for i, bank in enumerate(banks):
if bank != cur_bank:
cur_bank_count = 0
cur_bank = bank
else:
cur_bank_count += 1
stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count]
# STEP 7: Sort by the stripes values
sort_values, sort_indices = stripes.sort(dim=0)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 8: Truncate to the candidates size!
scores_buf = scores_buf[:self.num_cands]
indices_buf = indices_buf[:self.num_cands]
beams_buf = beams_buf[:self.num_cands]
return scores_buf, indices_buf, beams_buf, constraint_states
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
self.needs_src_lengths = True
def step(self, step: int, lprobs, scores):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step >= max_lens, :, self.eos] = 0
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.beam = BeamSearch(tgt_dict)
@torch.jit.export
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
"DiverseBeamSearch requires --beam to be divisible by the number of groups"
)
# initialize diversity penalty
diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g :: self.num_groups, :]
scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(
lprobs_g,
other=diversity_buf.unsqueeze(1),
alpha=self.diversity_strength,
)
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(
step, lprobs_g, scores_g
)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
diversity_buf.scatter_add_(
1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf)
)
# interleave results from different groups
scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1)
indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1)
beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1)
return scores_buf, indices_buf, beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1), beam_size, replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(
probs, dim=2, index=indices_buf.unsqueeze(-1)
)
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
class DiverseSiblingsSearch(Search):
"""
Beam search with diverse siblings.
See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details.
https://arxiv.org/abs/1611.08562
1/ Calculate hypotheses for each beam
2/ Intra-sibling ordering
3/ Rewrite scores
4/ Choose top K hypotheses
if diversity_rate == 0 is equivalent to BeamSearch
"""
def __init__(self, tgt_dict, diversity_rate):
super().__init__(tgt_dict)
self.diversity_rate = diversity_rate
self.beam = BeamSearch(tgt_dict)
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
k = min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
)
s_list: List[Tensor]
i_list: List[Tensor]
s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)]
i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)]
sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate
if step == 0:
return self.beam.step(step, lprobs, scores)
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# 1/ Calculate hypotheses for each beam
for i in range(beam_size):
torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i]))
i_list[i].fmod_(vocab_size)
# 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores
s_list[i].sub_(sibling_score)
# 4/ Choose top K hypotheses
indices = torch.stack(i_list, dim=1).view(bsz, -1)
final_scores = torch.empty(0).to(lprobs)
final_indices = torch.LongTensor().to(device=lprobs.device)
final_beams = torch.LongTensor().to(device=lprobs.device)
(final_scores, final_indices) = torch.topk(
torch.stack(s_list, dim=1).view(bsz, -1),
k,
)
final_beams = final_indices // k
for i in range(bsz):
final_indices[i] = indices[i][final_indices[i]]
return final_scores, final_indices, final_beams
| 27,939 | 40.0279 | 114 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/quantization_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from fairseq.modules.quantization import pq, quantization_options, scalar
logger = logging.getLogger(__name__)
def quantize_model_scalar(model, args):
quant_noise_scalar = getattr(args, 'quant_noise_scalar', 0)
if quant_noise_scalar > 0:
# quantize_model edits the model in place
scalar.quantize_model_(model, p=quant_noise_scalar, bits=8, update_step=1000)
return model
class Quantizer(object):
def __init__(self, config_path, max_epoch, max_update):
try:
import yaml
except ImportError:
raise ImportError('Please install yaml with: pip install yaml')
# parse config
if config_path:
with open(config_path) as config_file:
config = quantization_options.parse_config_yaml(
yaml.safe_load(config_file)
)
else:
config = quantization_options.parse_config_yaml({})
self.n_centroids_config = config["n_centroids"]
self.block_sizes_config = config["block_sizes"]
self.layers_to_quantize = config["layers_to_quantize"]
# We assume that training will run for a fixed number of epochs
# (or updates) and that we should train for equal durations
# between iterations of PQ.
num_iterations = len(self.layers_to_quantize)
if max_epoch > 0:
assert max_epoch % num_iterations == 0, (
'for iterative PQ, --max-epoch (={}) must be evenly divisible by '
'len(layers_to_quantize) (={})'.format(max_epoch, num_iterations)
)
self.epoch_schedule = max_epoch // num_iterations
else:
self.epoch_schedule = None
if max_update > 0:
assert max_update % num_iterations == 0, (
'for iterative PQ, --max-update (={}) must be evenly divisible by '
'len(layers_to_quantize) (={})'.format(max_update, num_iterations)
)
self.update_schedule = max_update // num_iterations
else:
self.update_schedule = None
assert (self.epoch_schedule is not None) ^ (self.update_schedule is not None), \
'for iterative PQ, cannot specify both --max-update and --max-epoch'
# 0 is a special value for quantization step, which will force
# the first call to begin_epoch() to call step()
self.quantization_step = 0
def set_trainer(self, trainer):
self.trainer = trainer
self.size_tracker = pq.SizeTracker(self.trainer.get_model())
def step(self):
"""Move to the next stage of quantization."""
if self.quantization_step >= len(self.layers_to_quantize):
# Maybe we just finished the last training step or we loaded
# a checkpoint for an iterative PQ model which previously
# finished training. Either way, don't quantize again.
return
logger.info(
'quantizing model (step={}; layers_to_quantize[step]={})'.format(
self.quantization_step, self.layers_to_quantize[self.quantization_step]
)
)
quantized_layers = pq.quantize_model_(
self.trainer.get_model(),
self.size_tracker,
self.layers_to_quantize,
self.block_sizes_config,
self.n_centroids_config,
step=self.quantization_step,
)
logger.info('quantized layers: {}'.format(quantized_layers))
logger.info(self.size_tracker)
self.quantization_step += 1
# reintialize the Trainer since model parameters have changed
self.trainer.reinitialize()
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch (epochs start at 1)."""
if (
(
self.epoch_schedule is not None
and epoch > 0
and (epoch - 1) % self.epoch_schedule == 0
)
# we always step once in the beginning, even if using
# update-based quantization
or self.quantization_step == 0
):
self.step()
def step_update(self, num_updates):
"""Called at the end of each step."""
if (
self.update_schedule is not None
and num_updates > 0
and num_updates % self.update_schedule == 0
):
self.step()
def state_dict(self):
return {
'n_centroids_config': self.n_centroids_config,
'block_sizes_config': self.block_sizes_config,
'layers_to_quantize': self.layers_to_quantize,
'epoch_schedule': self.epoch_schedule,
'update_schedule': self.update_schedule,
'quantization_step': self.quantization_step,
}
def load_state_dict(self, state_dict):
self.n_centroids_config = state_dict['n_centroids_config']
self.block_sizes_config = state_dict['block_sizes_config']
self.layers_to_quantize = state_dict['layers_to_quantize']
self.epoch_schedule = state_dict['epoch_schedule']
self.update_schedule = state_dict['update_schedule']
self.quantization_step = state_dict['quantization_step']
| 5,440 | 37.048951 | 88 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/nan_detector.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger = logging.getLogger(__name__)
class NanDetector:
"""
Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name
"""
def __init__(self, model, forward=True, backward=True):
self.bhooks = []
self.fhooks = []
self.forward = forward
self.backward = backward
self.reset()
for name, mod in model.named_modules():
mod.__module_name = name
self.add_hooks(mod)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def add_hooks(self, module):
if self.forward:
self.fhooks.append(module.register_forward_hook(self.fhook_fn))
if self.backward:
self.bhooks.append(module.register_backward_hook(self.bhook_fn))
def reset(self):
self.has_printed_f = False
self.has_printed_b = False
def _detect(self, tensor, name, backward):
err = None
if (
torch.is_floating_point(tensor)
# single value tensors (like the loss) will not provide much info
and tensor.numel() >= 2
):
with torch.no_grad():
if torch.isnan(tensor).any():
err = "NaN"
elif torch.isinf(tensor).any():
err = "Inf"
if err is not None:
err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}"
return err
def _apply(self, module, inp, x, backward):
if torch.is_tensor(x):
if isinstance(inp, tuple) and len(inp) > 0:
inp = inp[0]
err = self._detect(x, module.__module_name, backward)
if err is not None:
if torch.is_tensor(inp) and not backward:
err += (
f" input max: {inp.max().item()}, input min: {inp.min().item()}"
)
has_printed_attr = 'has_printed_b' if backward else 'has_printed_f'
logger.warning(err)
setattr(self, has_printed_attr, True)
elif isinstance(x, dict):
for v in x.values():
self._apply(module, inp, v, backward)
elif isinstance(x, list) or isinstance(x, tuple):
for v in x:
self._apply(module, inp, v, backward)
def fhook_fn(self, module, inp, output):
if not self.has_printed_f:
self._apply(module, inp, output, backward=False)
def bhook_fn(self, module, inp, output):
if not self.has_printed_b:
self._apply(module, inp, output, backward=True)
def close(self):
for hook in self.fhooks + self.bhooks:
hook.remove()
| 3,041 | 32.065217 | 119 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/iterative_refinement_generator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import torch
import numpy as np
from fairseq import utils
DecoderOut = namedtuple('IterativeRefinementDecoderOut', [
'output_tokens',
'output_scores',
'attn',
'step',
'max_step',
'history'
])
class IterativeRefinementGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None, constraints=None):
if constraints is not None:
raise NotImplementedError("Constrained decoding with the IterativeRefinementGenerator is not supported")
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert self.beam_size > 1, "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, 'enable_ensemble'):
assert model.allow_ensemble, "{} does not support ensembling".format(model.__class__.__name__)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()
# initialize
encoder_out = model.forward_encoder([src_tokens, src_lengths])
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
if self.beam_size > 1:
assert model.allow_length_beam, \
"{} does not support decoding with length beam.".format(model.__class__.__name__)
# regenerate data based on length-beam
length_beam_order = utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, length_beam_order)
prev_decoder_out = model.regenerate_length_beam(prev_decoder_out, self.beam_size)
bsz = bsz * self.beam_size
sent_idxs = torch.arange(bsz)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(decoder_out.output_tokens.size(0)).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]['history'] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]['history'].append(
finalized_hypos(
step,
finalized_history_tokens[j][i],
None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated]
if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
else None,
history=[h[not_terminated] for h in decoder_out.history]
if decoder_out.history is not None
else None,
)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, not_terminated.nonzero(as_tuple=False).squeeze())
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.beam_size > 1:
if reranker is not None:
finalized = self.rerank(
reranker, finalized, [src_tokens, src_lengths], self.beam_size
)
# aggregate information from length beam
finalized = [
finalized[np.argmax(
[finalized[self.beam_size * i + j][0]['score'] for j in range(self.beam_size)]
) + self.beam_size * i] for i in range(len(finalized) // self.beam_size)
]
return finalized
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]['tokens'] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = finalized_tokens[0].new_zeros(len(finalized_tokens), finalized_maxlen).fill_(self.pad)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, :f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[:, 0] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)).t().reshape(-1)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(reranker_encoder_out, length_beam_order)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), True, None)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(reranking_scores)
for i in range(len(finalized)):
finalized[i][0]['score'] = reranking_scores[i]
return finalized
| 12,517 | 38.36478 | 122 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
from itertools import chain
import logging
import sys
import time
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, distributed_utils, models, optim, utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
logger = logging.getLogger(__name__)
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, args, task, model, criterion, quantizer=None):
self.args = args
self.task = task
# catalog shared parameters
shared_params = _catalog_shared_params(model)
self.tpu = getattr(args, 'tpu', False)
self.cuda = torch.cuda.is_available() and not args.cpu and not self.tpu
if self.cuda:
self.device = torch.device('cuda')
elif self.tpu:
self.device = utils.get_tpu_device(args)
else:
self.device = torch.device('cpu')
# copy model and criterion to current device/dtype
self._criterion = criterion
self._model = model
if self.tpu:
import torch_xla.core.xla_model as xm
self._model = xm.send_cpu_data_to_device(self._model, self.device)
if args.fp16:
self._criterion = self._criterion.half()
self._model = self._model.half()
elif args.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
# check that shared parameters are preserved after device transfer
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info(
'detected shared parameter: {} <- {}'.format(shared_param[0], path)
)
_set_module_by_path(self._model, path, ref)
self._dummy_batch = "DUMMY" # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0 # for TPUs
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
#打印一下self.data_parallel_world_size 是不是代表GPU个数
# TODO(myleott): support tpu
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if self.quantizer is not None:
self.quantizer.set_trainer(self)
# get detailed cuda environment
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if self.data_parallel_world_size > 1:
self.cuda_env_arr = distributed_utils.all_gather_list(self.cuda_env)
else:
self.cuda_env_arr = [self.cuda_env]
if self.data_parallel_rank == 0:
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time("wall", priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
"""Reinitialize the Trainer, typically after model params change."""
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
@property
def data_parallel_world_size(self):
return self.args.distributed_world_size
@property
def data_parallel_process_group(self):
if self.tpu:
return ('tpu', None)
else:
return None
@property
def data_parallel_rank(self):
return self.args.distributed_rank
@property
def is_data_parallel_master(self):
return distributed_utils.is_master(self.args)
@property
def criterion(self):
if self._wrapped_criterion is None:
if (
utils.has_parameters(self._criterion)
and self.data_parallel_world_size > 1
and not self.args.use_bmuf
and not self.tpu
):
self._wrapped_criterion = models.DistributedFairseqModel(
self.args, self._criterion,
process_group=self.data_parallel_process_group
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if (
self.data_parallel_world_size > 1
and not self.args.use_bmuf
and not self.tpu
):
self._wrapped_model = models.DistributedFairseqModel(
self.args, self._model,
process_group=self.data_parallel_process_group
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
#print(type(self.model.parameters()),type(self.criterion.parameters()));
#<class 'generator'> <class 'generator'>
#for name, para in self.model.named_parameters():
# print(para); #与model.parameters()元素一致,这样可以通过name筛选,然后放到一个list里
if 1 or self.args.setting_encoder_lr==0:
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
else:
#for name, para in self.model.named_parameters():
# print(name)
#exit() #包含encoder norm的参数不decay
param1 = []
param2 = []
'''
for name, para in self.model.named_parameters():
if para.requires_grad:
if name.find('WNScale')<0 and name.find('weight_g')<0:
param1.append(para)
else:
print(name)
if self.args.cwn_adjust or self.args.wn_adjust:
print("adjust")
param2.append(para)
'''
for name, para in self.model.named_parameters():
if para.requires_grad:
if name.find('decoder')>=0:
param1.append(para)
else:
print(name)
param2.append(para)
p1 = {'params':param1,'lr':0.0015}
p2 = {'params':param2}
params = [p1,p2]
#print('before')
#for x in self.criterion.parameters(): #空的
# print(x)
#print('after')
#print(params[-1]);
if self.args.fp16 or self.args.bf16: #false
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16, "
"please switch to FP32 which is likely to be faster"
)
if self.args.memory_efficient_fp16 or self.args.memory_efficient_bf16:
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.args, params
)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: #false
logger.info("NOTE: your device may support faster training with --fp16")
self._optimizer = optim.build_optimizer(self.args, params) #here!
if self.args.use_bmuf: #false
self._optimizer = optim.FairseqBMUF(self.args, self._optimizer)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)
self._lr_scheduler.step_update(0)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if self.is_data_parallel_master: # only save one checkpoint
extra_state["metrics"] = metrics.state_dict()
extra_state["previous_training_time"] = self.cumulative_training_time()
checkpoint_utils.save_state(
filename,
self.args,
self.get_model().state_dict(),
self.get_criterion(),
self.optimizer,
self.lr_scheduler,
self.get_num_updates(),
self._optim_history,
extra_state,
)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""Load all training state from a checkpoint file."""
extra_state, self._optim_history, last_optim_state = None, [], None
bexists = PathManager.isfile(filename)
if bexists:
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
# load model parameters
try:
self.get_model().load_state_dict(
state["model"], strict=True, args=self.args
)
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
last_optim_state = state.get("last_optimizer_state", None)
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), "Criterion does not match; please reset the optimizer (--reset-optimizer)."
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), "Optimizer does not match; please reset the optimizer (--reset-optimizer)."
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
epoch = extra_state["train_iterator"]["epoch"]
logger.info(
"loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
if "previous_training_time" in extra_state:
self._previous_training_time = extra_state["previous_training_time"]
self._start_time = time.time()
self.lr_step(epoch)
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
else:
logger.info("no existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator( #生成了训练数据迭代器 --train_subset='train'控制
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
#epoch=1,2,3,....
#combine=True
#第一个epoch: load_dataset=True, 后续epoch为False
#data_selector=None
#shard_batch_itr=True
if load_dataset:
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.args.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
)
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.args.train_subset),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.args.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.args.num_workers,
epoch=epoch,
)
def get_valid_iterator( #生成了验证数据迭代器 --valid_subset='valid'控制
self,
subset,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
return self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.args.max_tokens_valid,
max_sentences=self.args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.args.num_workers,
)
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('begin_epoch') # wait for all workers
xm.mark_step()
@metrics.aggregate("train")
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
if self._dummy_batch == "DUMMY":
self._dummy_batch = samples[0]
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=0)
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
#打印samples长度,看看samples的长度是不是update_freq
#print(len(samples)); #是update_freq
for i, sample in enumerate(samples): #samples = [sample], 仅迭代一次 sample:tokens=3400
sample = self._prepare_sample(sample) #将数据搬到gpu上
if sample is None: #false
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch)
is_dummy_batch = True
else:
is_dummy_batch = False
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
#在这里用task调用了训练函数
#调用了! 包括: 前向传播、反向梯度计算
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if self.args.distributed_world_size == 1:
return None
else:
raise e
if self.tpu and i < len(samples) - 1: #false
# tpu-comment: every XLA operation before marking step is
# appended to the IR graph, and processing too many batches
# before marking step can lead to OOM errors.
# To handle gradient accumulation use case, we explicitly
# mark step here for every forward pass without a backward pass
import torch_xla.core.xla_model as xm
xm.mark_step()
if is_dummy_batch: #false
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
# gather logging outputs from all replicas
#print(self._sync_stats());exit()
if self._sync_stats(): #false 应该是多卡数据并行才需要
train_time = self._local_cumulative_training_time()
logging_outputs, (sample_size, ooms, total_train_time) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch,
)
self._cumulative_training_time = total_train_time / self.data_parallel_world_size
if hasattr(self.model, 'all_reduce'):
self.model.all_reduce()
overflow = False
try:
if self.tpu and self.data_parallel_world_size > 1: #false
import torch_xla.core.xla_model as xm
gradients = xm._fetch_gradients(self.optimizer.optimizer)
xm.all_reduce('sum', gradients, scale=1.0 / self.data_parallel_world_size)
with torch.autograd.profiler.record_function("multiply-grads"):
# multiply gradients by (# GPUs / sample_size) since DDP
# already normalizes by the number of GPUs. Thus we get
# (sum_of_gradients / sample_size).
if not self.args.use_bmuf: #true
self.optimizer.multiply_grads(self.data_parallel_world_size / sample_size)
elif sample_size > 0: # BMUF needs to check sample size
num = self.data_parallel_world_size if self._sync_stats() else 1
self.optimizer.multiply_grads(num / sample_size)
with torch.autograd.profiler.record_function("clip-grads"):
# clip grads
# 进入执行, 对反向传播计算好的grad进行clip操作
grad_norm = self.clip_grad_norm(self.args.clip_norm)
# check that grad norms are consistent across workers
if (
not self.args.use_bmuf
and self.args.distributed_wrapper != 'SlowMo'
and not self.tpu
):
self._check_grad_norms(grad_norm) #执行
with torch.autograd.profiler.record_function("optimizer"):
# take an optimization step
self.optimizer.step() #对参数进行梯度更新
except FloatingPointError:
# re-run the forward and backward pass with hooks attached to print
# out where it fails
with NanDetector(self.model):
self.task.train_step(
sample, self.model, self.criterion, self.optimizer, self.get_num_updates(),
ignore_grad=False
)
raise
except OverflowError as e:
overflow = True
logger.info("NOTE: overflow detected, " + str(e))
grad_norm = torch.tensor(0.).cuda()
self.zero_grad()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
# Some distributed wrappers (e.g., SlowMo) need access to the optimizer after the step
if hasattr(self.model, 'perform_additional_optimizer_actions'): #false
if hasattr(self.optimizer, 'fp32_params'):
self.model.perform_additional_optimizer_actions(self.optimizer.optimizer, self.optimizer.fp32_params)
else:
self.model.perform_additional_optimizer_actions(self.optimizer.optimizer)
if not overflow or self.args.distributed_wrapper == 'SlowMo': #true
self.set_num_updates(self.get_num_updates() + 1)
if self.tpu: #false
# mark step on TPUs
import torch_xla.core.xla_model as xm
xm.mark_step()
# only log stats every log_interval steps
# this causes wps to be misreported when log_interval > 1
logging_output = {}
if self.get_num_updates() % self.args.log_interval == 0:
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm,
)
# log whenever there's an XLA compilation, since these
# slow down training and may indicate opportunities for
# optimization
self._check_xla_compilation()
else: #执行
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm,
)
# clear CUDA cache to reduce memory fragmentation
if (
self.cuda
and self.args.empty_cache_freq > 0
and (
(self.get_num_updates() + self.args.empty_cache_freq - 1)
% self.args.empty_cache_freq
) == 0
):
torch.cuda.empty_cache()
if self.args.fp16: #false
metrics.log_scalar(
"loss_scale",
self.optimizer.scaler.loss_scale,
priority=700,
round=4,
weight=0,
)
metrics.log_stop_time("train_wall")
return logging_output
@metrics.aggregate("valid")
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('valid_step') # wait for all workers
xm.mark_step()
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
is_dummy_batch = True
else:
is_dummy_batch = False
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size, ) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ignore=is_dummy_batch,
)
# log validation stats
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from fairseq import meters
if 'get_meter' not in self._warn_once:
self._warn_once.add('get_meter')
utils.deprecation_warning(
'Trainer.get_meter is deprecated. Please use fairseq.metrics instead.'
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_"):]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=None)
def cumulative_training_time(self):
if self._cumulative_training_time is None:
# single GPU
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
"""Aggregate training time in seconds."""
return time.time() - self._start_time + self._previous_training_time
def _prepare_sample(self, sample):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
return None
if self.cuda:
sample = utils.move_to_cuda(sample)
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if self.args.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.args.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
utils.set_torch_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP or multiple GPUs with
# BMUF and it's a bmuf sync with warmup iterations completed before.
if self.data_parallel_world_size == 1:
return False
elif self.args.use_bmuf:
return (
(self.get_num_updates() + 1) % self.args.global_sync_iter == 0
and (self.get_num_updates() + 1) > self.args.warmup_iterations
)
else:
return True
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
else:
return self._all_gather_list_sync(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
def _all_gather_list_sync(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. all_gather_list_sync is
suitable when logging outputs are complex types.
"""
if self.tpu:
raise NotImplementedError
if ignore:
logging_outputs = []
results = list(zip(
*distributed_utils.all_gather_list(
[logging_outputs] + list(extra_stats_to_sum),
max_size=getattr(self.args, 'all_gather_list_size', 16384),
group=self.data_parallel_process_group,
)
))
logging_outputs, extra_stats_to_sum = results[0], results[1:]
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return logging_outputs, extra_stats_to_sum
def _fast_stat_sync_sum(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data['extra_stats_' + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data['logging_outputs_' + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(
data,
device=self.device,
group=self.data_parallel_process_group
)
extra_stats_to_sum = [
data['extra_stats_' + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data['logging_outputs_' + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def _check_grad_norms(self, grad_norm):
"""Check that grad norms are consistent across workers."""
if self._grad_norm_buf is not None:
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(
self._grad_norm_buf,
group=self.data_parallel_process_group
)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))
return (
not torch.isfinite(tensor).any()
or (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()
)
if not is_consistent(self._grad_norm_buf):
pretty_detail = "\n".join(
"rank {:3d} = {:.8f}".format(r, n)
for r, n in enumerate(self._grad_norm_buf.tolist())
)
error_detail = "grad_norm across the workers:\n{}\n".format(pretty_detail)
raise RuntimeError(
"Fatal error: gradients are inconsistent between workers. "
"Try --ddp-backend=no_c10d. "
"Or are you mixing up different generation of GPUs in training?"
+ "\n"
+ "-" * 80
+ "\n{}\n".format(error_detail)
+ "-" * 80
)
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if grad_norm is not None:
metrics.log_speed("ups", 1., priority=100, round=2)
metrics.log_scalar("gnorm", grad_norm, priority=400, round=3)
if self.args.clip_norm > 0:
metrics.log_scalar(
"clip",
torch.where(
grad_norm > self.args.clip_norm,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
priority=500,
round=1,
)
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
# extra warning for criterions that don't properly log a loss value
if "loss" not in agg:
if "loss" not in self._warn_once:
self._warn_once.add("loss")
logger.warning(
"Criterion.reduce_metrics did not log a 'loss' value, "
"which may break some functionality"
)
metrics.log_scalar("loss", -1)
# support legacy interface
if self.tpu:
logging_output = {}
else:
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
for key_to_delete in ["ppl", "wps", "wpb", "bsz"]:
if key_to_delete in logging_output:
del logging_output[key_to_delete]
return logging_output
def _check_xla_compilation(self, message=None):
import torch_xla.debug.metrics as met
compile_stats = met.metric_data("CompileTime")
if compile_stats is None:
return
num_xla_compiles = compile_stats[0]
if num_xla_compiles > self._num_xla_compiles:
if message is None:
message = (
"too many of these can lead to slow training, "
"but we expect a few in the beginning"
)
logging.info("NOTE: XLA compilation detected; {}".format(message))
self._num_xla_compiles = num_xla_compiles
def _catalog_shared_params(module, memo=None, prefix=''):
if memo is None:
first_call = True
memo = {}
else:
first_call = False
for name, param in module._parameters.items():
param_prefix = prefix + ('.' if prefix else '') + name
if param not in memo:
memo[param] = []
memo[param].append(param_prefix)
for name, m in module._modules.items():
if m is None:
continue
submodule_prefix = prefix + ('.' if prefix else '') + name
_catalog_shared_params(m, memo, submodule_prefix)
if first_call:
return [x for x in memo.values() if len(x) > 1]
def _get_module_by_path(module, path):
path = path.split('.')
for name in path:
module = getattr(module, name)
return module
def _set_module_by_path(module, path, value):
path = path.split('.')
for name in path[:-1]:
module = getattr(module, name)
setattr(module, path[-1], value)
| 41,387 | 37.608209 | 117 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/transformer_sentence_encoder_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import (
LayerNorm,
MultiheadAttention,
)
from fairseq.modules.quant_noise import quant_noise
from fairseq.modules.fairseq_dropout import FairseqDropout
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = 'relu',
export: bool = False,
q_noise: float = 0.0,
qn_block_size: int = 8,
init_fn: Callable = None,
) -> None:
super().__init__()
if init_fn is not None:
init_fn()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__)
self.activation_dropout_module = FairseqDropout(activation_dropout, module_name=self.__class__.__name__)
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = self.build_self_attention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = self.build_fc1(
self.embedding_dim,
ffn_embedding_dim,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
self.fc2 = self.build_fc2(
ffn_embedding_dim,
self.embedding_dim,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), q_noise, qn_block_size
)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), q_noise, qn_block_size
)
def build_self_attention(
self,
embed_dim,
num_attention_heads,
dropout,
self_attention,
q_noise,
qn_block_size,
):
return MultiheadAttention(
embed_dim,
num_attention_heads,
dropout=dropout,
self_attention=True,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
def forward(
self,
x: torch.Tensor,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer implementation.
"""
residual = x
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
| 4,160 | 28.721429 | 112 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/learned_positional_embedding.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = torch.zeros(
(1, 1), device=input.device, dtype=input.dtype
).fill_(int(self.padding_idx + input.size(1)))
else:
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
| 2,259 | 35.451613 | 94 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/sparse_multihead_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from .multihead_attention import MultiheadAttention
class SparseMultiheadAttention(MultiheadAttention):
""" Sparse Multi-Headed Attention.
"Generating Long Sequences with Sparse Transformers". Implements
fixed factorized self attention, where l=stride and c=expressivity.
A(1) includes all words in the stride window and A(2) takes a summary of c
words from the end of each stride window.
If is_bidirectional=False, we do not include any words past the current word,
as in the paper.
"""
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
add_bias_kv=False, add_zero_attn=False, self_attention=False,
encoder_decoder_attention=False, stride=32, expressivity=8, is_bidirectional=True):
super().__init__(
embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv,
add_zero_attn, self_attention, encoder_decoder_attention
)
self.is_bidirectional = is_bidirectional
self.stride = stride
self.expressivity = expressivity
assert(self.stride > 0 and self.stride >= self.expressivity)
# Used for Ai(2) calculations - beginning of [l-c, l] range
def compute_checkpoint(self, word_index):
if word_index % self.stride == 0 and word_index != 0:
checkpoint_index = word_index - self.expressivity
else:
checkpoint_index = (
math.floor(word_index / self.stride) * self.stride
+ self.stride - self.expressivity
)
return checkpoint_index
# Computes Ai(2)
def compute_subset_summaries(self, absolute_max):
checkpoint_index = self.compute_checkpoint(0)
subset_two = set()
while checkpoint_index <= absolute_max-1:
summary = set(range(checkpoint_index, min(
checkpoint_index+self.expressivity+1, absolute_max)
))
subset_two = subset_two.union(summary)
checkpoint_index = self.compute_checkpoint(checkpoint_index+self.stride)
return subset_two
# Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf
def compute_fixed_attention_subset(self, word_index, tgt_len):
# +1s account for range function; [min, max) -> [min, max]
if not self.is_bidirectional:
absolute_max = word_index + 1
else:
absolute_max = tgt_len
# Subset 1 - whole window
rounded_index = math.floor((word_index + self.stride) / self.stride) * self.stride
if word_index % self.stride == 0 and word_index != 0:
subset_one = set(range(word_index-self.stride, min(absolute_max, word_index+1)))
else:
subset_one = set(range(max(0, rounded_index - self.stride), min(
absolute_max, rounded_index+1))
)
# Subset 2 - summary per window
# If bidirectional, subset 2 is the same for every index
subset_two = set()
if not self.is_bidirectional:
subset_two = self.compute_subset_summaries(absolute_max)
return subset_one.union(subset_two)
# Compute sparse mask - if bidirectional, can pre-compute and store
def buffered_sparse_mask(self, tensor, tgt_len, src_len):
assert(tgt_len > self.stride)
sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float('-inf'))
# If bidirectional, subset 2 is the same for every index
subset_summaries = set()
if self.is_bidirectional:
subset_summaries = self.compute_subset_summaries(tgt_len)
for i in range(tgt_len):
fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len)
fixed_attention_subset = fixed_attention_subset.union(subset_summaries)
included_word_indices = torch.LongTensor(list(fixed_attention_subset))
sparse_mask[i].index_fill_(0, included_word_indices, 0)
return sparse_mask.type_as(tensor)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len)
sparse_mask = sparse_mask.unsqueeze(0).expand(bsz * self.num_heads, tgt_len, src_len)
attn_weights += sparse_mask
| 4,525 | 42.104762 | 100 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/statistics_utils.py
|
import numpy as np
import torch
from scipy import io
#save: save statistics in a file
#record: only record
def save_residual_proportion(self, x, residual, module):
# T,B,C
assert module in ['att','ffn'], "wrong module in residual proportion!"
if not self.training or not self.record_residual_proportion:
return
file = self.record_residual_att_file if module=='att' else self.record_residual_ffn_file
if self.step%self.save_interval==0:
self.x_proportion[file].append(x[:,0,:].norm())
self.residual_proportion[file].append(residual[:,0,:].norm())
self.total_proportion[file].append((x+residual)[:,0,:].norm())
if(self.step%self.save_interval==0):
savefile = '{}_{}.mat'.format(file,self.step//self.save_interval)
d = {}
d['x'] = np.array(self.x_proportion[file])
d['residual'] = np.array(self.residual_proportion[file])
d['total'] = np.array(self.total_proportion[file])
self.x_proportion[file] = []
self.residual_proportion[file] = []
self.total_proportion[file] = []
io.savemat(savefile,d)
def get_singular_values(cov, eps=1e-5):
#input: covariance matrix C*C
#output: singular values in increasing order: C
C,_ = cov.shape
cov += eps*torch.eye(C).cuda() #for numerical stability
s, _ = torch.symeig(cov)
return s
def record_forward_weight_norm(self):
if not self.training or not self.record_weight_norm:
return
if self.step%self.record_interval==0:
w1, w2 = self.fc1.weight.data, self.fc2.weight.data
self.w1_norm.append(w1.norm())
self.w2_norm.append(w2.norm())
cov1 = torch.matmul(w1.transpose(0,1),w1)
cov2 = torch.matmul(w2,w2.transpose(0,1))
s1 = get_singular_values(cov1)
s2 = get_singular_values(cov2)
self.w_singular_value[self.record_w1_grad_file].append(s1.reshape([1,-1]))
self.w_singular_value[self.record_w2_grad_file].append(s2.reshape([1,-1]))
def save_forward_backward_weight_norm(self,savefile):
w1_norm = torch.tensor(self.w1_norm).cpu().numpy()
w2_norm = torch.tensor(self.w2_norm).cpu().numpy()
w1_grad_norm = torch.tensor(self.w_grad_norm[self.record_w1_grad_file]).cpu().numpy()
w2_grad_norm = torch.tensor(self.w_grad_norm[self.record_w2_grad_file]).cpu().numpy()
w1_singular_value = torch.cat(self.w_singular_value[self.record_w1_grad_file],dim=0).cpu().numpy()
w2_singular_value = torch.cat(self.w_singular_value[self.record_w2_grad_file],dim=0).cpu().numpy()
d = {}
d['w1_norm'] = w1_norm
d['w2_norm'] = w2_norm
d['w1_grad_norm'] = w1_grad_norm
d['w2_grad_norm'] = w2_grad_norm
d['w1_singular_value'] = w1_singular_value
d['w2_singular_value'] = w2_singular_value
file = "{}_{}.mat".format(savefile, self.step//self.save_interval)
io.savemat(file,d)
self.w1_norm = []
self.w2_norm = []
self.w_grad_norm[self.record_w1_grad_file] = []
self.w_grad_norm[self.record_w2_grad_file] = []
self.w_singular_value[self.record_w1_grad_file] = []
self.w_singular_value[self.record_w2_grad_file] = []
def save_attn_weights(self, attn_weights):
#B*T*T(tgt_len, src_len)
if not self.training or not self.record_attn_weight:
return
if self.step%self.save_interval==0:
attn_weights = attn_weights[0]
x_len = self.mask[0].sum()
x_len = x_len.int()
attn_weights = attn_weights[:x_len,:x_len] #要注意是left-pad还是right-pad bug!
attn_weights = attn_weights.data.cpu().numpy()
self.attn_weight_list.append(attn_weights)
if(self.step%self.save_interval==0):
file = '{}_{}.mat'.format(self.attn_weight_savefile,self.step//self.save_interval)
d = {}
d['attn_weight'] = np.array(self.attn_weight_list)
self.attn_weight_list = []
io.savemat(file,d)
def dominated_word(self, x, position, process):
#x: T,B,C
copyx = x
copyx = copyx.transpose(0,1) #B,T,C
norms = copyx.norm(dim=-1) #B,T
self.d_norm[process][position].append(norms.norm().cpu().numpy().item())
#dominated word
v, _ = norms.max(dim=-1,keepdim=True) #B,1
norms = norms/v
mean = norms.mean(dim=-1) #B
index = torch.argmax(norms,dim=-1,keepdim=True) #B,1 #求argmax还不够,还要满足第一与第二的比值足够大 利用topk函数
word = self.src_tokens.gather(dim=-1,index=index)
value2, _ = torch.topk(norms,k=2) #B,2
self.d_dominated_word[process][position].append(word.cpu().numpy().reshape([-1,]))
self.d_dominated_index[process][position].append(index.cpu().numpy().reshape([-1,]))
self.d_dominated_r[process][position].append(mean.cpu().numpy().reshape([-1,]))
self.d_dominated_top2_value[process][position].append(value2.cpu().numpy())
def zero_rate(self, x, position, process):
T,B,C = x.shape
copyx = x
num_tokens = self.mask.sum()
num_pads = B*T*C-num_tokens*C
copyx = copyx*(self.mask.transpose(0,1))
num_zeros = (copyx==0).sum()
num_zero_neurous = (((copyx==0).sum(dim=0))==T).sum()
num_zero_words = (((copyx==0).sum(dim=-1))==C).sum()
num_zero_words = num_zero_words-(B*T-num_tokens)
num_pads = num_pads.type_as(num_zeros)
num_zeros = num_zeros-num_pads
num_zeros = num_zeros.cpu().numpy().item()
num_tokens = num_tokens.cpu().numpy().item()
num_zero_neurous = num_zero_neurous.cpu().numpy().item()
num_zero_words = num_zero_words.cpu().numpy().item()
r = num_zeros/num_tokens/C
r_neuron = num_zero_neurous/B/C
r_word = num_zero_words/num_tokens
self.d_zero_rate[process][position].append([r, r_neuron, r_word])
def norm_distribution(self, x, position, process):
#print(copyx.shape)
copyx = x.transpose(0,1) #B,T,C
items = min(copyx.shape[0],2)
for i in range(items):
temx = copyx[i] #T,C
len_x = self.mask[i].sum()
len_x = len_x.int()
temx = temx[:len_x] #len_x, C
bag = torch.zeros(self.max_len)
bag[:len_x] = torch.norm(temx,dim=1)
bag[-1] = len_x.float()
self.d_norm_distribution[process][position].append(bag.reshape([1,-1])) #一维数组
def save_norm_statistics(self, x, position, process):
#T,B,C
if len(self.record_parts[process])==0:
return
if 'norm_distribution' in self.norm_items[process]:
norm_distribution(self, x, position, process)
if 'dominated_word' in self.norm_items[process]:
dominated_word(self, x, position, process)
if 'zero_rate' in self.norm_items[process]:
zero_rate(self, x, position, process)
if(self.step%self.save_interval==0):
d = {}
if 'norm_distribution' in self.norm_items[process]:
save_norm_distribution = torch.cat(self.d_norm_distribution[process][position],dim=0).cpu().numpy()
d['norm_distribution'] = save_norm_distribution
self.d_norm_distribution[process][position] = []
if 'dominated_word' in self.norm_items[process]:
save_dominated_word = np.concatenate(self.d_dominated_word[process][position])
save_dominated_index = np.concatenate(self.d_dominated_index[process][position])
save_dominated_r = np.concatenate(self.d_dominated_r[process][position])
save_dominated_top2_value = np.concatenate(self.d_dominated_top2_value[process][position],axis=0)
save_norm = np.array(self.d_norm[process][position])
d['dominated_word'] = save_dominated_word
d['dominated_index'] = save_dominated_index
d['dominated_r'] = save_dominated_r
d['dominated_top2_value'] = save_dominated_top2_value
d['norm'] = save_norm
self.d_dominated_word[process][position] = []
self.d_dominated_index[process][position] = []
self.d_dominated_r[process][position] = []
self.d_dominated_top2_value[process][position] = []
self.d_norm[process][position] = []
if 'zero_rate' in self.norm_items[process]:
r_list = [i[0] for i in self.d_zero_rate[process][position]]
r_neuron_list = [i[1] for i in self.d_zero_rate[process][position]]
r_word_list = [i[2] for i in self.d_zero_rate[process][position]]
save_zero_rate = np.array(r_list)
save_zero_neuron_rate = np.array(r_neuron_list)
save_zero_word_rate = np.array(r_word_list)
d['zero_rate'] = save_zero_rate
d['zero_neuron_rate'] = save_zero_neuron_rate
d['zero_word_rate'] = save_zero_word_rate
self.d_zero_rate[process][position] = []
file = "{}_{}.mat".format(self.d_savefile[process][position],self.step//self.save_interval)
io.savemat(file,d)
def save_condition_statistics(self, x, position, process):
#T,B,C
if len(self.condition_items[process])==0:
return
eps = 1e-5
T, B, C = x.shape
x_len = self.mask.sum(dim=(1,2)) #B
r = torch.zeros(1).cuda()
c_max = torch.zeros(1).cuda()
c_20 = torch.zeros(1).cuda()
c_50 = torch.zeros(1).cuda()
c_80 = torch.zeros(1).cuda()
r_total = torch.zeros(1).cuda()
intra_average_sim = torch.zeros(1).cuda()
inter_average_sim = torch.zeros(1).cuda()
total_average_sim = torch.zeros(1).cuda()
s = torch.zeros(1).cuda()
if 'r' in self.condition_items[process]:
rx = x.transpose(0,1) #B,T,C
#cov_r = torch.bmm(rx.transpose(1,2),rx) #B,C,C
iters = min(rx.shape[0],1)
for i in range(iters):
feature = rx[i] #T,C
cov_r = torch.matmul(feature.transpose(0,1),feature)/x_len[i]
cov_r += torch.eye(C).cuda() #for numerical stability
try:
s, _ = torch.symeig(cov_r) #返回为增序特征值 特征值分解很费时间
except:
print("eigenvalue decomposition error when computing stable rank! ")
print(cov_r)
print(feature[:,1].sum())
print(cov_r.shape)
print(x_len[i])
#print(s)
exit()
s = torch.ones(cov_r.size(1)).cuda()
temr = torch.sum(s)/s[-1] #square of singular values
r += temr
r = r/iters
#r = r.cpu().numpy().item()
#start_time = time.time()
if 'c_max' in self.condition_items[process] or 'r_total' in self.condition_items[process]:
yx = x.transpose(0,1) #B,T,C
y = yx.reshape(-1,yx.shape[-1]) #(B*T)*C
#y = y[index] #去掉pad word
cov = torch.matmul(y.transpose(0,1),y)/y.shape[0] #C*C C=512
try:
s, _ = torch.symeig(cov) #返回为增序特征值 特征值分解很费时间
except:
print("eigenvalue decomposition error!")
s = torch.ones(cov.size(1)).cuda()
r_total = (torch.sum(s)/s[-1]) #square of singular values
c_max = s[-1]
c_20 = (c_max/s[len(s)*4//5])
c_50 = (c_max/s[len(s)//2])
c_80 = (c_max/s[len(s)*1//5])
#print("cov time: {}".format(time.time() - start_time))
#start_time = time.time()
if 'intra_average_sim' in self.condition_items[process]:
sx = x.transpose(0,1) #B,T,C
sim_items = B//4
sx = sx[:sim_items]
y = sx.reshape(-1,sx.shape[-1]) #(sim_items*T)*C
y = y/(y.norm(dim=-1,keepdim=True)+eps)
sim = torch.matmul(y, y.transpose(0,1)) #sim_items*T,sim_items*T
#print("sim 1 time: {}".format(time.time() - start_time))
#start_time = time.time()
intra_sim = 0
items_len = x_len[:sim_items].reshape([1,-1])
for i in range(sim_items):
temsim = torch.sum(sim[T*i:T*(i+1),T*i:T*(i+1)])
intra_sim += temsim
dim = torch.sum(items_len)
inter_sim = torch.sum(sim)-intra_sim
intra_sim -= dim
total_average_sim = inter_sim+intra_sim
intra_items = torch.sum(items_len**2)-torch.sum(items_len)
total_items = dim*dim-dim
inter_items = total_items-intra_items
intra_average_sim = intra_sim/intra_items
inter_average_sim = inter_sim/inter_items
total_average_sim = total_average_sim/total_items
#print("sim 2 time: {}".format(time.time() - start_time))
#start_time = time.time()
collect_items = [c_max, c_20, c_50, c_80, r_total, r, intra_average_sim, inter_average_sim, total_average_sim]
self.d_condition_number[process][position].append(collect_items)
self.d_condition_singular_value[process][position].append(s.reshape([1,-1]))
if self.step%self.save_interval==0:
d = {}
for i, name in enumerate(self.condition_items[process]):
d[name] = np.array([b[i].cpu().numpy().item() for b in self.d_condition_number[process][position]])
singular_value = torch.cat(self.d_condition_singular_value[process][position],dim=0).cpu().numpy()
d['condition_singular_value'] = singular_value[::10] #取1/10即可
self.d_condition_number[process][position] = []
self.d_condition_singular_value[process][position] = []
file = "{}_{}.mat".format(self.d_condition_savefile[process][position],self.step//self.save_interval)
io.savemat(file,d)
def probe_hook(self, position, process):
def hook_template(x):
copyx = x.data.clone()
copyx = copyx*self.mask.transpose(0,1)
#x can be forward features or backward gradient
if 'norm' in self.record_parts[process]:
save_norm_statistics(self, copyx, position, process)
if 'condition' in self.record_parts[process]:
save_condition_statistics(self, copyx, position, process)
return hook_template
def insert_forward_probe(self, x, position):
return
if self.record_process['forward'] and position in self.probe_positions["forward"]:
cur_probe_hook = probe_hook(self, position, process="forward")
cur_probe_hook(x)
def insert_backward_probe(self, x, position):
return
if self.record_process['backward'] and position in self.probe_positions["backward"]:
cur_probe_hook = probe_hook(self, position, process="backward")
x.register_hook(cur_probe_hook)
def insert_probe_unify(self, x, position, process):
assert process in ['forward', 'backward'], "wrong process(insert_probe_unify)"
if self.record_process[process] and position in self.probe_positions[process]:
cur_probe_hook = probe_hook(self, position, process=process)
if process=='forward':
cur_probe_hook(x)
else:
x.register_hook(cur_probe_hook)
def insert_probe(self, x, position):
#includ forward & backward probe
if not self.training or self.step%self.record_interval!=0:
return
insert_probe_unify(self, x, position, process='forward')
insert_probe_unify(self, x, position, process='backward')
| 14,971 | 42.397101 | 115 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/multihead_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
#from fairseq.modules import CWN
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
__count = 0
def update_cnt(self):
MultiheadAttention.__count += 1
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
penalty=0,
normalize_q=False,
normalize_k=False,
normalize_v=False,
g0=10,
fix_g0=False,
):
super().__init__()
self.id = self.__count #对每个attention设定唯一标识
self.update_cnt()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
self.penalty = penalty
self.normalize_q = normalize_q
self.normalize_k = normalize_k
self.normalize_v = normalize_v
self.fix_g0 = fix_g0
self.eps = 1e-5
if self.normalize_q or self.normalize_k:
self.g0 = Parameter(torch.Tensor(1))
nn.init.constant_(self.g0, g0)
if self.fix_g0:
self.g0 = self.g0.cuda().detach()
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size)
self.v_proj = quant_noise(nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size)
self.q_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size)
#self.k_proj = CWN(self.kdim, embed_dim, NScale = -1, adjustScale=1)
#self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
#self.q_proj = CWN(embed_dim, embed_dim, NScale = -1, adjustScale=1)
self.out_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters() #注意在使用CWN时要去掉!!!
self.onnx_trace = False
self.tpu = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def prepare_for_tpu_(self, **kwargs):
self.tpu = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) #weight与weight_v一样
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def orth_loss(self, attention_score):
#感觉计算量太大了
#需要额外的mask,mask住的位置可能为-inf,不能直接乘
#代码不完善
B, T, _ = attention_score.shape
cov = torch.bmm(attention_score.transpose(1,2),attention_score)
cur_loss = 0
#print("orth loss:",cur_loss)
return cur_loss
def norm_irrelevant_attention_weight(self,q,k):
#q: B, tgt_len, head_dim
#k: B, src_len, head_dim
q = q/(q.norm(dim=-1,keepdim=True)+self.eps)
k = k/(k.norm(dim=-1,keepdim=True)+self.eps)
attn_weights = torch.bmm(q,k.transpose(1,2))
return attn_weights
def loss(self):
loss = 0
assert self.training==True, "wrongly adding attention loss!"
if self.self_attention and self.id<6 and self.penalty>0:
loss += self.penalty*orth_loss(self.attention_score)
return loss
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if (
not self.onnx_trace
and not self.tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
): #cross:false static_kv=true
assert key is not None and value is not None
#print("here");exit() #indeed here!
#a = torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias))
#print("a")
#内部dropout是把weight设为0而不是-inf; 如果是作用在softmax之后的weight则没问题。
'''
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
a,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
'''
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
if not self.normalize_q and not self.normalize_k:
q *= self.scaling
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
src_len = k.size(1)
if (self.self_attention and self.id<6):
if self.normalize_q:
q = q/(q.norm(dim=-1,keepdim=True)+self.eps)
q *= self.g0
if self.normalize_k:
k = k/(k.norm(dim=-1,keepdim=True)+self.eps)
if self.normalize_v:
v = v/(v.norm(dim=-1,keepdim=True)+self.eps)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
#print(attn_mask);print(attn_mask.shape);print(attn_weights.shape);exit()
#attn_mask = attn_mask.repeat(attn_weights.size(0),1,1)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf")
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
self.attention_score = attn_weights #for addtional loss
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
#print("here");exit() #here
if incremental_state is not None: #cross:false
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention: #cross: true
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None: #false
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None: #cross: false
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0: #cross:false
key_padding_mask = None
if key_padding_mask is not None: #cross:true(unnecessary code)
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:#cross:false
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) #nothing done
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None: #cross: false
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None: #cross: true
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not self.tpu: #true
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf")
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf'))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax: #false
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1: #cross: false
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| 23,984 | 40.070205 | 110 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/transpose_last.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
transpose last 2 dimensions of the input
"""
import torch.nn as nn
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
| 550 | 25.238095 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/norm_select.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .norm.mask_layernorm3d import MaskLayerNorm3d
from .norm.mask_batchnorm3d import MaskBatchNorm3d
from .norm.mask_powernorm3d import MaskPowerNorm3d
from .norm.mask_groupnorm import GroupNorm
from .norm.mask_groupscale import MaskGroupScale
from .norm.mask_identity import MaskIdentityNorm
from torch import nn
def parse_norm(norm):
args = norm.split("_")
return args
def NormSelect(norm_type, embed_dim, layer_id=-1, prefix='None'):
#print(norm_type)
args = parse_norm(norm_type)
norm_type = args[0]
if norm_type == "layer":
if len(args)==1:
print("nn.layernorm")
return nn.LayerNorm(embed_dim)
#return MaskLayerNorm3d(embed_dim)
elif len(args)==2:
return MaskLayerNorm3d(embed_dim, affine=int(args[1]))
else:
return MaskLayerNorm3d(embed_dim, affine=int(args[1]), square=int(args[2]))
elif norm_type == "identity":
return MaskIdentityNorm(embed_dim)
elif norm_type == "group":
#assert len(args)==6, "wrong groupnorm argument!"
return GroupNorm(embed_dim, num_groups=int(args[1]))
#return GroupNorm(embed_dim, num_groups=int(args[1]), affine=int(args[2]) ,subtract_type=args[3], robust_mean=int(args[4]),robust_std=int(args[5]))
elif norm_type == "power":
if args[-1]=='select':
if args[-2].find(str(layer_id))>=0:
return MaskPowerNorm3d(embed_dim, prefix=prefix, penalty_var=float(args[1]))
else:
return MaskLayerNorm3d(embed_dim, affine=1)
if len(args)==1:
return MaskPowerNorm3d(embed_dim, prefix=prefix)
elif len(args)==2:
return MaskPowerNorm3d(embed_dim, prefix=prefix, penalty_var=float(args[1]))
elif norm_type == "batch":
# return MaskBatchNorm(embed_dim)
if len(args)==3:
return MaskBatchNorm3d(embed_dim, affine=int(args[1]), with_seq=int(args[2]), prefix=prefix)
elif len(args)==4:
return MaskBatchNorm3d(embed_dim, penalty_type=args[1], penalty_mean=float(args[2]), penalty_var=float(args[3]), prefix=prefix)
else:
print("error len BN!")
elif norm_type == "groupscale":
print("groupscale")
return MaskGroupScale(embed_dim, group_num=8)
else:
print("error NormSelect!")
exit()
#elif norm_type == 'power':
# return MaskPowerNorm(embed_dim, group_num=head_num, warmup_iters=warmup_updates)
| 2,685 | 40.323077 | 155 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/same_pad.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
class SamePad(nn.Module):
def __init__(self, kernel_size):
super().__init__()
self.remove = kernel_size % 2 == 0
def forward(self, x):
if self.remove:
x = x[:, :, :-1]
return x
| 432 | 21.789474 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/multihead_attention_simple.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
#from fairseq.modules import CWN
class RelativeEmbedding(nn.Module):
def forward(self, input):
"""Input is expected to be of size [bsz x seqlen].
"""
bsz, seq_len = input.size()
max_pos = self.padding_idx + seq_len
if max_pos > self.origin_shift: #将embedding个数设多一点,不要出现这种情况
# recompute/expand embeddings if needed
weights = self.get_embedding(
max_pos*2,
self.embedding_dim,
self.padding_idx,
)
weights = weights.to(self._float_tensor)
del self.weights
self.origin_shift = weights.size(0)//2
self.register_buffer('weights', weights)
positions = torch.arange(-seq_len, seq_len).to(input.device).long() + self.origin_shift # 2*seq_len
embed = self.weights.index_select(0, positions.long()).detach()
return embed
class RelativeSinusoidalPositionalEmbedding(RelativeEmbedding):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1536):
"""
:param embedding_dim: 每个位置的dimension
:param padding_idx:
:param init_size:
"""
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
assert init_size%2==0
weights = self.get_embedding(
init_size+1,
embedding_dim,
padding_idx,
)
self.register_buffer('weights', weights)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(-num_embeddings//2, num_embeddings//2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
self.origin_shift = num_embeddings//2 + 1
return emb
class MultiheadAttentionSimple(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
__count = 0
def update_cnt(self):
MultiheadAttentionSimple.__count += 1
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
penalty=0,
normalize_q=False,
normalize_k=False,
normalize_v=False,
g0=10,
fix_g0=False,
):
super().__init__()
self.id = self.__count #对每个attention设定唯一标识
self.update_cnt()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = nn.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = True
self.penalty = penalty
self.normalize_q = normalize_q
self.normalize_k = normalize_k
self.normalize_v = normalize_v
self.fix_g0 = fix_g0
self.eps = 1e-5
if self.normalize_q or self.normalize_k:
self.g0 = Parameter(torch.Tensor(1))
nn.init.constant_(self.g0, g0)
if self.fix_g0:
self.g0 = self.g0.cuda().detach()
assert self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
#self.k_proj = CWN(self.kdim, embed_dim, NScale = -1, adjustScale=1)
#self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
#self.q_proj = CWN(embed_dim, embed_dim, NScale = -1, adjustScale=1)
self.out_proj =nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters() #注意在使用CWN时要去掉!!!
self.pos_embed = RelativeSinusoidalPositionalEmbedding(self.head_dim, 0, 1200)
self.r_r_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(num_heads, self.head_dim)))
self.r_w_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(num_heads, self.head_dim)))
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) #weight与weight_v一样
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def orth_loss(self, attention_score):
#感觉计算量太大了
#需要额外的mask,mask住的位置可能为-inf,不能直接乘
#代码不完善
B, T, _ = attention_score.shape
cov = torch.bmm(attention_score.transpose(1,2),attention_score)
cur_loss = 0
#print("orth loss:",cur_loss)
return cur_loss
def norm_irrelevant_attention_weight(self,q,k):
#q: B, tgt_len, head_dim
#k: B, src_len, head_dim
q = q/(q.norm(dim=-1,keepdim=True)+self.eps)
k = k/(k.norm(dim=-1,keepdim=True)+self.eps)
attn_weights = torch.bmm(q,k.transpose(1,2))
return attn_weights
def loss(self):
loss = 0
assert self.training==True, "wrongly adding attention loss!"
if self.self_attention and self.id<6 and self.penalty>0:
loss += self.penalty*self.orth_loss(self.attention_score)
return loss
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
if not self.normalize_q and not self.normalize_k:
q *= self.scaling
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) #b*n, l,d
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) #b*n, l,d
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) #b*n, l,d
src_len = k.size(1)
if self.id<6:
if self.normalize_q:
q = q/(q.norm(dim=-1,keepdim=True)+self.eps)
q *= self.g0
if self.normalize_k:
k = k/(k.norm(dim=-1,keepdim=True)+self.eps)
if self.normalize_v:
v = v/(v.norm(dim=-1,keepdim=True)+self.eps)
attn_weights = torch.bmm(q, k.transpose(1, 2))
pos_embed = self.pos_embed(key_padding_mask) # l x head_dim
D_ = torch.einsum('nd,ld->nl', self.r_w_bias, pos_embed)[None, :, None] # head x 2max_len, 每个head对位置的bias
B_ = torch.einsum('bnqd,ld->bnql', q.view(bsz, self.num_heads, tgt_len, self.head_dim), pos_embed) # bsz x head x max_len x 2max_len,每个query对每个shift的偏移
E_ = torch.einsum('bnqd,ld->bnql', k.view(bsz, self.num_heads, tgt_len, self.head_dim), pos_embed) # bsz x head x max_len x 2max_len, key对relative的bias
BD = B_ + D_ # bsz x head x max_len x 2max_len, 要转换为bsz x head x max_len x max_len
BDE = self._shift(BD) + self._transpose_shift(E_)
BDE = BDE.contiguous().view(bsz * self.num_heads, tgt_len, tgt_len)
attn_weights += BDE*self.scaling
if attn_mask is not None:
#print(attn_mask);print(attn_mask.shape);print(attn_weights.shape);exit()
#attn_mask = attn_mask.repeat(attn_weights.size(0),1,1)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf")
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights_float = F.softmax(attn_weights, dim=-1, dtype=torch.float32)
attn_weights = attn_weights_float.type_as(attn_weights)
self.attention_score = attn_weights #for addtional loss
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
def _shift(self, BD):
"""
类似
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
转换为
0 1 2
-1 0 1
-2 -1 0
:param BD: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = BD.new_zeros(bsz, n_head, max_len, 1)
BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) # bsz x n_head x (2max_len+1) x max_len
BD = BD[:, :, :-1].view(bsz, n_head, max_len, -1) # bsz x n_head x 2max_len x max_len
BD = BD[:, :, :, max_len:]
return BD
def _transpose_shift(self, E):
"""
类似
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
转换为
0 -10 -200
1 00 -100
2 10 000
:param E: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = E.new_zeros(bsz, n_head, max_len, 1)
# bsz x n_head x -1 x (max_len+1)
E = torch.cat([E, zero_pad], dim=-1).view(bsz, n_head, -1, max_len)
indice = (torch.arange(max_len)*2+1).to(E.device)
E = E.index_select(index=indice, dim=-2).transpose(-1,-2) # bsz x n_head x max_len x max_len
return E
| 13,929 | 37.480663 | 161 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/linearized_convolution.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from .conv_tbc import ConvTBC
from fairseq.incremental_decoding_utils import with_incremental_state
@with_incremental_state
class LinearizedConvolution(ConvTBC):
"""An optimized version of nn.Conv1d.
At training time, this module uses ConvTBC, which is an optimized version
of Conv1d. At inference time, it optimizes incremental generation (i.e.,
one time step at a time) by replacing the convolutions with linear layers.
Note that the input order changes from training to inference.
"""
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self._linearized_weight = None
self.register_backward_hook(self._clear_linearized_weight)
def state_dict(self, destination=None, prefix='', keep_vars=False):
state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars)
# don't store redundant _linearized_weight in checkpoints
if prefix + '_linearized_weight' in state:
del state[prefix + '_linearized_weight']
return state
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + '.' if name != '' else ''
if prefix + '_linearized_weight' in state_dict:
del state_dict[prefix + '_linearized_weight']
def forward(self, input, incremental_state=None):
"""
Args:
incremental_state: Used to buffer signal; if not None, then input is
expected to contain a single frame. If the input order changes
between time steps, call reorder_incremental_state.
Input:
Time x Batch x Channel during training
Batch x Time x Channel during inference
"""
if incremental_state is None:
output = super().forward(input)
if self.kernel_size[0] > 1 and self.padding[0] > 0:
# remove future timesteps added by padding
output = output[:-self.padding[0], :, :]
return output
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
bsz = input.size(0) # input: bsz x len x dim
if kw > 1:
input = input.data
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = input.new(bsz, kw, input.size(2)).zero_()
self._set_input_buffer(incremental_state, input_buffer)
else:
# shift buffer
input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone()
# append next input
input_buffer[:, -1, :] = input[:, -1, :]
input = input_buffer
with torch.no_grad():
output = F.linear(input.view(bsz, -1), weight, self.bias)
return output.view(bsz, 1, -1)
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def _get_linearized_weight(self):
if self._linearized_weight is None:
kw = self.kernel_size[0]
weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()
assert weight.size() == (self.out_channels, kw, self.in_channels)
self._linearized_weight = torch.nn.Parameter(weight.view(self.out_channels, -1))
return self._linearized_weight
def _clear_linearized_weight(self, *args):
self._linearized_weight = None
| 4,261 | 41.19802 | 95 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/downsampled_multihead_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules.scalar_bias import scalar_bias
from fairseq.modules.fairseq_dropout import FairseqDropout
class SingleHeadAttention(nn.Module):
"""
Single-head attention that supports Gating and Downsampling
"""
def __init__(
self, out_channels, embed_dim, head_dim, head_index, dropout=0.,
bias=True, project_input=True, gated=False, downsample=False,
num_heads=1,
):
super().__init__()
self.embed_dim = embed_dim
self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__)
self.head_index = head_index
self.head_dim = head_dim
self.project_input = project_input
self.gated = gated
self.downsample = downsample
self.num_heads = num_heads
self.projection = None
k_layers = []
v_layers = []
if self.downsample:
k_layers.append(Downsample(self.head_index))
v_layers.append(Downsample(self.head_index))
out_proj_size = self.head_dim
else:
out_proj_size = self.head_dim * self.num_heads
if self.gated:
k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias)
v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
else:
k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias)
v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_k = nn.Sequential(*k_layers)
self.in_proj_v = nn.Sequential(*v_layers)
if self.downsample:
self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias)
else:
self.out_proj = Linear(out_proj_size, out_channels, bias=bias)
self.scaling = self.head_dim**-0.5
def forward(
self, query, key, value, mask_future_timesteps=False,
key_padding_mask=None, use_scalar_bias=False,
):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
src_len, bsz, out_channels = key.size()
tgt_len = query.size(0)
assert list(query.size()) == [tgt_len, bsz, out_channels]
assert key.size() == value.size()
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.downsample:
size = bsz
else:
size = bsz * self.num_heads
k = key
v = value
q = query
if self.project_input:
q = self.in_proj_q(q)
k = self.in_proj_k(k)
v = self.in_proj_v(v)
src_len = k.size()[0]
q *= self.scaling
if not self.downsample:
q = q.view(tgt_len, size, self.head_dim)
k = k.view(src_len, size, self.head_dim)
v = v.view(src_len, size, self.head_dim)
q = q.transpose(0, 1)
k = k.transpose(0, 1)
v = v.transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if mask_future_timesteps:
assert query.size() == key.size(), \
'mask_future_timesteps only applies to self-attention'
attn_weights *= torch.tril(
attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(),
diagonal=-1,
)[:, ::self.head_index + 1 if self.downsample else 1].unsqueeze(0)
attn_weights += torch.triu(
attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(),
diagonal=0
)[:, ::self.head_index + 1 if self.downsample else 1].unsqueeze(0)
tgt_size = tgt_len
if use_scalar_bias:
attn_weights = scalar_bias(attn_weights, 2)
v = scalar_bias(v, 1)
tgt_size += 1
if key_padding_mask is not None:
# don't attend to padding symbols
if key_padding_mask.max() > 0:
if self.downsample:
attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len)
else:
attn_weights = attn_weights.view(size, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-math.inf,
)
attn_weights = attn_weights.view(size, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_weights = self.dropout_module(attn_weights)
attn = torch.bmm(attn_weights, v)
if self.downsample:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
attn = self.out_proj(attn)
return attn, attn_weights
class DownsampledMultiHeadAttention(nn.ModuleList):
"""
Multi-headed attention with Gating and Downsampling
"""
def __init__(
self, out_channels, embed_dim, num_heads, dropout=0., bias=True,
project_input=True, gated=False, downsample=False,
):
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.downsample = downsample
self.gated = gated
self.project_input = project_input
assert self.head_dim * num_heads == embed_dim
if self.downsample:
attention_heads = []
for index in range(self.num_heads):
attention_heads.append(
SingleHeadAttention(
out_channels, self.embed_dim, self.head_dim, index,
dropout, bias, self.project_input, self.gated,
self.downsample, self.num_heads,
)
)
super().__init__(modules=attention_heads)
self.out_proj = Linear(embed_dim, out_channels, bias=bias)
else:
# either we have a list of attention heads, or just one attention head
# if not being downsampled, we can do the heads with one linear layer instead of separate ones
super().__init__()
self.attention_module = SingleHeadAttention(
out_channels, self.embed_dim, self.head_dim, 1, dropout,
bias, self.project_input, self.gated, self.downsample, self.num_heads,
)
def forward(
self, query, key, value, mask_future_timesteps=False,
key_padding_mask=None, use_scalar_bias=False,
):
src_len, bsz, embed_dim = key.size()
tgt_len = query.size(0)
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
tgt_size = tgt_len
if use_scalar_bias:
tgt_size += 1
attn = []
attn_weights = []
if self.downsample:
for attention_head_number in range(self.num_heads):
# call the forward of each attention head
_attn, _attn_weight = self[attention_head_number](
query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias,
)
attn.append(_attn)
attn_weights.append(_attn_weight)
full_attn = torch.cat(attn, dim=2)
full_attn = self.out_proj(full_attn)
return full_attn, attn_weights[0].clone()
else:
_attn, _attn_weight = self.attention_module(
query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias,
)
attn.append(_attn)
attn_weights.append(_attn_weight)
full_attn = torch.cat(attn, dim=2)
full_attn_weights = torch.cat(attn_weights)
full_attn_weights = full_attn_weights.view(bsz, self.num_heads, tgt_size, src_len)
full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads
return full_attn, full_attn_weights
class Downsample(nn.Module):
"""
Selects every nth element, where n is the index
"""
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x[::self.index+1]
def Linear(in_features, out_features, dropout=0., bias=True):
"""Weight-normalized Linear layer (input: B x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
def GatedLinear(in_features, out_features, dropout=0., bias=True):
"""Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units"""
return nn.Sequential(
Linear(in_features, out_features*4, dropout, bias),
nn.GLU(),
Linear(out_features*2, out_features*2, dropout, bias),
nn.GLU(),
Linear(out_features, out_features, dropout, bias)
)
| 9,863 | 37.381323 | 106 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/multihead_attention_relative.py
|
import torch
from torch import nn
import torch.nn.functional as F
import math
class RelativeEmbedding(nn.Module):
def forward(self, input):
"""Input is expected to be of size [bsz x seqlen].
"""
bsz, seq_len = input.size()
max_pos = self.padding_idx + seq_len
if max_pos > self.origin_shift: #将embedding个数设多一点,不要出现这种情况
# recompute/expand embeddings if needed
weights = self.get_embedding(
max_pos*2,
self.embedding_dim,
self.padding_idx,
)
weights = weights.to(self._float_tensor)
del self.weights
self.origin_shift = weights.size(0)//2
self.register_buffer('weights', weights)
positions = torch.arange(-seq_len, seq_len).to(input.device).long() + self.origin_shift # 2*seq_len
embed = self.weights.index_select(0, positions.long()).detach()
return embed
class RelativeSinusoidalPositionalEmbedding(RelativeEmbedding):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1536):
"""
:param embedding_dim: 每个位置的dimension
:param padding_idx:
:param init_size:
"""
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
assert init_size%2==0
weights = self.get_embedding(
init_size+1,
embedding_dim,
padding_idx,
)
self.register_buffer('weights', weights)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(-num_embeddings//2, num_embeddings//2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
self.origin_shift = num_embeddings//2 + 1
return emb
class RelativeMultiHeadAttn(nn.Module):
def __init__(self, d_model, n_head, dropout=0.0, r_w_bias=None, r_r_bias=None, scale=True):
"""
:param int d_model:
:param int n_head:
:param dropout: 对attention map的dropout
:param r_w_bias: n_head x head_dim or None, 如果为dim
:param r_r_bias: n_head x head_dim or None,
:param scale:
:param rel_pos_embed:
"""
super().__init__()
self.qkv_linear = nn.Linear(d_model, d_model * 3, bias=False)
self.n_head = n_head
self.head_dim = d_model // n_head
self.dropout_layer = nn.Dropout(dropout)
self.pos_embed = RelativeSinusoidalPositionalEmbedding(d_model//n_head, 0, 1200)
if scale:
self.scale = math.sqrt(d_model // n_head)
else:
self.scale = 1
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(n_head, d_model // n_head)))
self.r_w_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(n_head, d_model // n_head)))
else:
self.r_r_bias = r_r_bias # r_r_bias就是v
self.r_w_bias = r_w_bias # r_w_bias就是u
def forward(self, x, mask):
"""
:param x: batch_size x max_len x d_model
:param mask: batch_size x max_len
:return:
"""
batch_size, max_len, d_model = x.size()
pos_embed = self.pos_embed(mask) # l x head_dim
qkv = self.qkv_linear(x) # batch_size x max_len x d_model3
q, k, v = torch.chunk(qkv, chunks=3, dim=-1)
q = q.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
k = k.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
v = v.view(batch_size, max_len, self.n_head, -1).transpose(1, 2) # b x n x l x d
rw_head_q = q + self.r_r_bias[:, None]
AC = torch.einsum('bnqd,bnkd->bnqk', [rw_head_q, k]) # b x n x l x d, n是head
D_ = torch.einsum('nd,ld->nl', self.r_w_bias, pos_embed)[None, :, None] # head x 2max_len, 每个head对位置的bias
B_ = torch.einsum('bnqd,ld->bnql', q, pos_embed) # bsz x head x max_len x 2max_len,每个query对每个shift的偏移
E_ = torch.einsum('bnqd,ld->bnql', k, pos_embed) # bsz x head x max_len x 2max_len, key对relative的bias
BD = B_ + D_ # bsz x head x max_len x 2max_len, 要转换为bsz x head x max_len x max_len
BDE = self._shift(BD) + self._transpose_shift(E_)
attn = AC + BDE
attn = attn / self.scale
attn = attn.masked_fill(mask[:, None, None, :].eq(1), float('-inf'))
attn = F.softmax(attn, dim=-1)
attn = self.dropout_layer(attn)
v = torch.matmul(attn, v).transpose(1, 2).reshape(batch_size, max_len, d_model) # b x n x l x d
return v
def _shift(self, BD):
"""
类似
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
转换为
0 1 2
-1 0 1
-2 -1 0
:param BD: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = BD.new_zeros(bsz, n_head, max_len, 1)
BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) # bsz x n_head x (2max_len+1) x max_len
BD = BD[:, :, :-1].view(bsz, n_head, max_len, -1) # bsz x n_head x 2max_len x max_len
BD = BD[:, :, :, max_len:]
return BD
def _transpose_shift(self, E):
"""
类似
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
转换为
0 -10 -200
1 00 -100
2 10 000
:param E: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = E.new_zeros(bsz, n_head, max_len, 1)
# bsz x n_head x -1 x (max_len+1)
E = torch.cat([E, zero_pad], dim=-1).view(bsz, n_head, -1, max_len)
indice = (torch.arange(max_len)*2+1).to(E.device)
E = E.index_select(index=indice, dim=-2).transpose(-1,-2) # bsz x n_head x max_len x max_len
return E
| 6,946 | 35.952128 | 118 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quant_noise.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
def quant_noise(module, p, block_size):
"""
Wraps modules and applies quantization noise to the weights for
subsequent quantization with Iterative Product Quantization as
described in "Training with Quantization Noise for Extreme Model Compression"
Args:
- module: nn.Module
- p: amount of Quantization Noise
- block_size: size of the blocks for subsequent quantization with iPQ
Remarks:
- Module weights must have the right sizes wrt the block size
- Only Linear, Embedding and Conv2d modules are supported for the moment
- For more detail on how to quantize by blocks with convolutional weights,
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
- We implement the simplest form of noise here as stated in the paper
which consists in randomly dropping blocks
"""
# if no quantization noise, don't register hook
if p <= 0:
return module
# supported modules
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
# test whether module.weight has the right sizes wrt block_size
is_conv = module.weight.ndim == 4
# 2D matrix
if not is_conv:
assert module.weight.size(1) % block_size == 0, "Input features must be a multiple of block sizes"
# 4D matrix
else:
# 1x1 convolutions
if module.kernel_size == (1, 1):
assert module.in_channels % block_size == 0, "Input channels must be a multiple of block sizes"
# regular convolutions
else:
k = module.kernel_size[0] * module.kernel_size[1]
assert k % block_size == 0, "Kernel size must be a multiple of block size"
def _forward_pre_hook(mod, input):
# no noise for evaluation
if mod.training:
if not is_conv:
# gather weight and sizes
weight = mod.weight
in_features = weight.size(1)
out_features = weight.size(0)
# split weight matrix into blocks and randomly drop selected blocks
mask = torch.zeros(in_features // block_size * out_features, device=weight.device)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
else:
# gather weight and sizes
weight = mod.weight
in_channels = mod.in_channels
out_channels = mod.out_channels
# split weight matrix into blocks and randomly drop selected blocks
if mod.kernel_size == (1, 1):
mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
else:
mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)
mask.bernoulli_(p)
mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
# scale weights and apply mask
mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript
s = 1 / (1 - p)
mod.weight.data = s * weight.masked_fill(mask, 0)
module.register_forward_pre_hook(_forward_pre_hook)
return module
| 3,666 | 39.296703 | 110 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/gelu.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with
the corresponding GitHub repo: https://github.com/hendrycks/GELUs
"""
import math
import torch
import torch.nn as nn
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
| 706 | 26.192308 | 87 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/statistics_init.py
|
import numpy as np
import torch
from scipy import io
from .statistics_utils import save_forward_backward_weight_norm
#need to define self.prefix, self.id before initializing statistics
def init_residual_proportion(self, args):
self.record_residual_proportion = args.record_residual_proportion
if self.record_residual_proportion:
self.record_residual_att_file = 'statistics/{}/residual_att_{}'.format(self.prefix,self.id)
self.record_residual_ffn_file = 'statistics/{}/residual_ffn_{}'.format(self.prefix,self.id)
self.x_proportion = {}
self.residual_proportion = {}
self.total_proportion = {}
files = [self.record_residual_att_file, self.record_residual_ffn_file]
for file in files:
self.x_proportion[file] = []
self.residual_proportion[file] = []
self.total_proportion[file] = []
def backward_hook_weight(self, savefile):
def backward_hook_template(grad):
if self.training and self.step%self.save_interval==0:
self.w_grad_norm[savefile].append(grad.norm())
if(self.step%self.save_interval==0 and savefile.find("w1_grad")>=0):
save_forward_backward_weight_norm(self, self.save_weight_file)
return backward_hook_template
def init_forward_backward_weight_norm(self,args):
self.record_weight_norm = args.record_weight_norm
if self.record_weight_norm:
self.record_w1_grad_file = 'statistics/{}/w1_grad_norm_{}'.format(self.prefix,self.id)
self.record_w2_grad_file = 'statistics/{}/w2_grad_norm_{}'.format(self.prefix,self.id)
self.save_weight_file = 'statistics/{}/weight_grad_norm_{}'.format(self.prefix,self.id)
self.w1_norm = []
self.w2_norm = []
self.w_grad_norm = {}
self.w_singular_value = {}
self.w_grad_norm[self.record_w1_grad_file] = []
self.w_grad_norm[self.record_w2_grad_file] = []
self.w_singular_value[self.record_w1_grad_file] = []
self.w_singular_value[self.record_w2_grad_file] = []
self.fc1.weight.register_hook(backward_hook_weight(self, self.record_w1_grad_file))
self.fc2.weight.register_hook(backward_hook_weight(self, self.record_w2_grad_file))
def init_attn_weight(self, args):
self.record_attn_weight = args.record_attn_weight
if self.record_attn_weight:
self.attn_weight_list = []
self.attn_weight_savefile = 'statistics/{}/attn_weight_{}'.format(self.prefix, self.id)
def init_probe_norm(self, args, process):
assert process in ['forward', 'backward']
self.d_savefile[process] = {}
self.d_norm[process]= {}
self.d_norm_distribution[process] = {}
self.d_dominated_word[process] = {}
self.d_dominated_index[process] = {}
self.d_dominated_top2_value[process] = {}
self.d_dominated_r[process] = {}
self.d_zero_rate[process] = {}
#use position to mark everything
for position in self.probe_positions[process]:
savefile = 'statistics/{}/{}_norm_{}_{}'.format(self.prefix, process, position, self.id)
self.d_savefile[process][position] = savefile
self.d_norm[process][position] = []
self.d_norm_distribution[process][position] = []
self.d_dominated_word[process][position]= []
self.d_dominated_index[process][position] = []
self.d_dominated_top2_value[process][position] = []
self.d_dominated_r[process][position] = []
self.d_zero_rate[process][position] = []
def init_probe_condition(self, args, process):
assert process in ['forward', 'backward']
self.d_condition_savefile[process] = {}
self.d_condition_number[process] = {}
self.d_condition_singular_value[process] = {}
#use position to mark everything
for position in self.probe_positions[process]:
savefile = 'statistics/{}/{}_condition_{}_{}'.format(self.prefix, process, position, self.id)
self.d_condition_savefile[process][position] = savefile
self.d_condition_number[process][position] = []
self.d_condition_singular_value[process][position] = []
def init_probe_statistics(self, args, process):
assert process in ['forward', 'backward']
def add_prefix(cur_list, prefix):
cur_list = ["{}_{}".format(prefix, item) for item in cur_list]
return cur_list
#self.candidate_positions[process] = add_prefix(self.all_positions, process)
#self.candidate_parts[process] = add_prefix(self.all_parts, process)
#self.candidate_norm_items[process] = add_prefix(self.all_norm_items, process)
#self.candidate_condition_items[process] = add_prefix(self.all_condition_items, process)
#"norm" includes: input_norm_distribution, dominated word(include input_norm), zero_rate
#
probe_position = args.forward_probe_position if process=='forward' else args.backward_probe_position
record_parts = args.forward_record_parts if process=='forward' else args.backward_record_parts
record_norm_items = args.forward_record_norm_items if process=='forward' else args.backward_record_norm_items
record_condition_items = args.forward_record_condition_items if process=='forward' else args.backward_record_condition_items
if probe_position=='all':
self.probe_positions[process] = self.all_positions
else:
self.probe_positions[process] = (probe_position).split(',') if probe_position!='none' else []
if record_parts=='all':
self.record_parts[process] = self.all_parts
else:
self.record_parts[process] = (record_parts).split(',') if record_parts!='none' else []
if record_norm_items=='all':
self.norm_items[process] = self.all_norm_items
else:
self.norm_items[process] = (record_norm_items).split(',') if record_norm_items!='none' else []
if record_condition_items=='all':
self.condition_items[process] = self.all_condition_items
else:
self.condition_items[process] = (record_condition_items).split(',') if record_condition_items!='none' else []
self.record_process[process] = 0 if probe_position=='none' or record_parts=='none' else 1
if not self.record_process[process]:
return
init_probe_norm(self, args, process)
init_probe_condition(self, args, process)
def init_all_statistics(self,args):
#forward part
init_residual_proportion(self, args)
init_forward_backward_weight_norm(self,args)
init_attn_weight(self, args)
init_probe_statistics(self, args, "forward")
#backward statistics are exactly the same as forward statistics
#backward part
init_probe_statistics(self, args, "backward")
def init_base_dictionary(self,args):
#condition
self.d_condition_savefile = {}
self.d_condition_number = {}
self.d_condition_singular_value = {}
#others
self.d_savefile = {}
self.d_norm = {}
self.d_norm_distribution = {}
self.d_dominated_word = {}
self.d_dominated_index = {}
self.d_dominated_top2_value = {}
self.d_dominated_r = {}
self.d_zero_rate = {}
#
self.probe_positions = {}
self.record_parts = {}
self.norm_items = {}
self.condition_items = {}
#
self.all_positions = ['att_input','att_output','att_norm_input','ffn_input','ffn_output','ffn_norm_input','before_relu','after_relu']
self.all_parts = ['norm','condition']
self.all_norm_items = ['norm_distribution','dominated_word','zero_rate']
self.all_condition_items = ['c_max', 'c_20', 'c_50', 'c_80', 'r_total', 'r', 'intra_average_sim', 'inter_average_sim', 'total_average_sim']
self.candidate_positions = {}
self.candidate_parts = {}
self.candidate_norm_items = {}
self.candidate_condition_items = {}
#
self.record_process = {}
def init_config(self, args):
#initialize configuration for saving statistics
def extract_id(s):
cand = s.split('/')
for c in cand:
if(c.find('transformer')>=0):
return c
print("error path!")
exit()
return 'error'
self.prefix = extract_id(args.save_dir)
self.step = 0
self.save_interval = 1100 #standard: 1100
self.record_interval = 50 #standard: 20
assert self.save_interval%self.record_interval==0, "wrong interval!"
self.max_len = 600
#self.d_statistics = {} #for saving forward & backward statistics
init_base_dictionary(self,args)
init_all_statistics(self,args)
| 8,439 | 43.188482 | 144 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/positional_embedding.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from .learned_positional_embedding import LearnedPositionalEmbedding
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
):
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1,
)
return m
| 1,286 | 38 | 83 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/fairseq_dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool = False):
if self.training or self.apply_during_inference:
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def make_generation_fast_(
self,
name: str,
retain_dropout: bool = False,
retain_dropout_modules: Optional[List[str]] = None,
**kwargs
):
if retain_dropout:
if retain_dropout_modules is not None and self.module_name is None:
logger.warning(
'Cannot enable dropout during inference for module {} '
'because module_name was not set'.format(name)
)
elif (
retain_dropout_modules is None # if None, apply to all modules
or self.module_name in retain_dropout_modules
):
logger.info(
'Enabling dropout during inference for module: {}'.format(name)
)
self.apply_during_inference = True
else:
logger.info('Disabling dropout for module: {}'.format(name))
| 1,687 | 30.849057 | 83 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/cross_entropy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def _cross_entropy_pytorch(logits, target, ignore_index=None, reduction='mean'):
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
return F.nll_loss(
lprobs, target, ignore_index=ignore_index, reduction=reduction,
)
try:
import xentropy_cuda
from apex.contrib import xentropy
logger.info('using fused cross entropy')
def cross_entropy(logits, target, ignore_index=-100, reduction='mean'):
if logits.device == torch.device('cpu'):
return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
else:
half_to_float = (logits.dtype == torch.half)
losses = xentropy.SoftmaxCrossEntropyLoss.apply(
logits, target, 0.0, ignore_index, half_to_float,
)
if reduction == 'sum':
return losses.sum()
elif reduction == 'mean':
if ignore_index >= 0:
return losses.sum() / target.ne(ignore_index).sum()
else:
return losses.mean()
elif reduction == 'none':
return losses
else:
raise NotImplementedError
except ImportError:
def cross_entropy(logits, target, ignore_index=-100, reduction='mean'):
return _cross_entropy_pytorch(logits, target, ignore_index, reduction)
| 1,650 | 30.75 | 82 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/adaptive_input.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from fairseq.modules.quant_noise import quant_noise
from typing import List
class AdaptiveInput(nn.Module):
def __init__(
self,
vocab_size: int,
padding_idx: int,
initial_dim: int,
factor: float,
output_dim: int,
cutoff: List[int],
q_noise: float = 0,
qn_block_size: int = 8,
):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert vocab_size == cutoff[
-1], 'cannot specify cutoff larger than vocab size'
self.cutoff = cutoff
self.embedding_dim = output_dim
self.padding_idx = padding_idx
self.embeddings = nn.ModuleList()
for i in range(len(self.cutoff)):
prev = self.cutoff[i - 1] if i > 0 else 0
size = self.cutoff[i] - prev
dim = int(initial_dim // (factor ** i))
seq = nn.Sequential(
nn.Embedding(size, dim, self.padding_idx),
quant_noise(nn.Linear(dim, output_dim, bias=False), q_noise, qn_block_size),
)
self.embeddings.append(seq)
self.padding_idx = None
self.padding_idx = padding_idx
def init_weights(m):
if isinstance(m, nn.Embedding):
nn.init.normal_(m.weight, mean=0, std=m.weight.shape[1] ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
elif hasattr(m, 'weight'):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def weights_for_band(self, band: int):
return self.embeddings[band][0].weight, self.embeddings[band][1].weight
def forward(self, input: torch.Tensor):
result = self._float_tensor.new(input.shape + (self.embedding_dim,))
for i in range(len(self.cutoff)):
mask = input.lt(self.cutoff[i])
if i > 0:
mask.mul_(input.ge(self.cutoff[i - 1]))
chunk_input = input[mask] - self.cutoff[i - 1]
else:
chunk_input = input[mask]
if mask.any():
result[mask] = self.embeddings[i](chunk_input)
return result
| 2,514 | 30.835443 | 92 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/gumbel_vector_quantizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class GumbelVectorQuantizer(nn.Module):
def __init__(
self,
dim,
num_vars,
temp,
groups,
combine_groups,
vq_dim,
time_first,
activation=nn.GELU(),
weight_proj_depth=1,
weight_proj_factor=1,
):
"""Vector quantization using gumbel softmax
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1
weight_proj_depth: number of layers (with activation in between) to project input before computing logits
weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of
projections by this factor
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim))
nn.init.uniform_(self.vars)
if weight_proj_depth > 1:
def block(input_dim, output_dim):
return nn.Sequential(nn.Linear(input_dim, output_dim), activation)
inner_dim = self.input_dim * weight_proj_factor
self.weight_proj = nn.Sequential(
*[
block(self.input_dim if i == 0 else inner_dim, inner_dim)
for i in range(weight_proj_depth - 1)
],
nn.Linear(inner_dim, groups * num_vars),
)
else:
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
assert len(temp) == 3, temp
self.max_temp, self.min_temp, self.temp_decay = temp
self.curr_temp = self.max_temp
self.codebook_indices = None
def set_num_updates(self, num_updates):
self.curr_temp = max(
self.max_temp * self.temp_decay ** num_updates, self.min_temp
)
def get_codebook_indices(self):
if self.codebook_indices is None:
from itertools import product
p = [range(self.num_vars)] * self.groups
inds = list(product(*p))
self.codebook_indices = torch.tensor(
inds, dtype=torch.long, device=self.vars.device
).flatten()
if not self.combine_groups:
self.codebook_indices = self.codebook_indices.view(
self.num_vars ** self.groups, -1
)
for b in range(1, self.groups):
self.codebook_indices[:, b] += self.num_vars * b
self.codebook_indices = self.codebook_indices.flatten()
return self.codebook_indices
def codebook(self):
indices = self.get_codebook_indices()
return (
self.vars.squeeze(0)
.index_select(0, indices)
.view(self.num_vars ** self.groups, -1)
)
def sample_from_codebook(self, b, n):
indices = self.get_codebook_indices()
indices = indices.view(-1, self.groups)
cb_size = indices.size(0)
assert (
n < cb_size
), f"sample size {n} is greater than size of codebook {cb_size}"
sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))
indices = indices[sample_idx]
z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)
return z
def to_codebook_index(self, indices):
res = indices.new_full(indices.shape[:-1], 0)
for i in range(self.groups):
exponent = self.groups - i - 1
res += indices[..., i] * (self.num_vars ** exponent)
return res
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars * self.groups}
if not self.time_first:
x = x.transpose(1, 2)
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
_, k = x.max(-1)
hard_x = (
x.new_zeros(*x.shape)
.scatter_(-1, k.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
avg_probs = torch.softmax(
x.view(bsz * tsz, self.groups, -1).float(), dim=-1
).mean(dim=0)
result["prob_perplexity"] = torch.exp(
-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)
).sum()
result["temp"] = self.curr_temp
if self.training:
x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
if self.combine_groups:
vars = vars.repeat(1, self.groups, 1)
if produce_targets:
result["targets"] = (
x.view(bsz * tsz * self.groups, -1)
.argmax(dim=-1)
.view(bsz, tsz, self.groups)
.detach()
)
x = x.unsqueeze(-1) * vars
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
if not self.time_first:
x = x.transpose(1, 2) # BTC -> BCT
result["x"] = x
return result
| 6,792 | 33.135678 | 117 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/vggblock.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections.abc import Iterable
from itertools import repeat
import torch
import torch.nn as nn
def _pair(v):
if isinstance(v, Iterable):
assert len(v) == 2, "len(v) != 2"
return v
return tuple(repeat(v, 2))
def infer_conv_output_dim(conv_op, input_dim, sample_inchannel):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim)
# N x C x H x W
# N: sample_bsz, C: sample_inchannel, H: sample_seq_len, W: input_dim
x = conv_op(x)
# N x C x H x W
x = x.transpose(1, 2)
# N x H x C x W
bsz, seq = x.size()[:2]
per_channel_dim = x.size()[3]
# bsz: N, seq: H, CxW the rest
return x.contiguous().view(bsz, seq, -1).size(-1), per_channel_dim
class VGGBlock(torch.nn.Module):
"""
VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf
Args:
in_channels: (int) number of input channels (typically 1)
out_channels: (int) number of output channels
conv_kernel_size: convolution channels
pooling_kernel_size: the size of the pooling window to take a max over
num_conv_layers: (int) number of convolution layers
input_dim: (int) input dimension
conv_stride: the stride of the convolving kernel.
Can be a single number or a tuple (sH, sW) Default: 1
padding: implicit paddings on both sides of the input.
Can be a single number or a tuple (padH, padW). Default: None
layer_norm: (bool) if layer norm is going to be applied. Default: False
Shape:
Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)
Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)
"""
def __init__(
self,
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim,
conv_stride=1,
padding=None,
layer_norm=False,
):
assert (
input_dim is not None
), "Need input_dim for LayerNorm and infer_conv_output_dim"
super(VGGBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_kernel_size = _pair(conv_kernel_size)
self.pooling_kernel_size = _pair(pooling_kernel_size)
self.num_conv_layers = num_conv_layers
self.padding = (
tuple(e // 2 for e in self.conv_kernel_size)
if padding is None
else _pair(padding)
)
self.conv_stride = _pair(conv_stride)
self.layers = nn.ModuleList()
for layer in range(num_conv_layers):
conv_op = nn.Conv2d(
in_channels if layer == 0 else out_channels,
out_channels,
self.conv_kernel_size,
stride=self.conv_stride,
padding=self.padding,
)
self.layers.append(conv_op)
if layer_norm:
conv_output_dim, per_channel_dim = infer_conv_output_dim(
conv_op, input_dim, in_channels if layer == 0 else out_channels
)
self.layers.append(nn.LayerNorm(per_channel_dim))
input_dim = per_channel_dim
self.layers.append(nn.ReLU())
if self.pooling_kernel_size is not None:
pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True)
self.layers.append(pool_op)
self.total_output_dim, self.output_dim = infer_conv_output_dim(
pool_op, input_dim, out_channels
)
def forward(self, x):
for i, _ in enumerate(self.layers):
x = self.layers[i](x)
return x
| 4,057 | 33.683761 | 88 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/character_token_embedder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Tuple
import torch
from torch import nn
import torch.nn.functional as F
from fairseq.data import Dictionary
CHAR_PAD_IDX = 0
CHAR_EOS_IDX = 257
logger = logging.getLogger(__name__)
class CharacterTokenEmbedder(torch.nn.Module):
def __init__(
self,
vocab: Dictionary,
filters: List[Tuple[int, int]],
char_embed_dim: int,
word_embed_dim: int,
highway_layers: int,
max_char_len: int = 50,
char_inputs: bool = False
):
super(CharacterTokenEmbedder, self).__init__()
self.onnx_trace = False
self.embedding_dim = word_embed_dim
self.max_char_len = max_char_len
self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0)
self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim))
self.eos_idx, self.unk_idx = 0, 1
self.char_inputs = char_inputs
self.convolutions = nn.ModuleList()
for width, out_c in filters:
self.convolutions.append(
nn.Conv1d(char_embed_dim, out_c, kernel_size=width)
)
last_dim = sum(f[1] for f in filters)
self.highway = Highway(last_dim, highway_layers) if highway_layers > 0 else None
self.projection = nn.Linear(last_dim, word_embed_dim)
assert vocab is not None or char_inputs, "vocab must be set if not using char inputs"
self.vocab = None
if vocab is not None:
self.set_vocab(vocab, max_char_len)
self.reset_parameters()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def set_vocab(self, vocab, max_char_len):
word_to_char = torch.LongTensor(len(vocab), max_char_len)
truncated = 0
for i in range(len(vocab)):
if i < vocab.nspecial:
char_idxs = [0] * max_char_len
else:
chars = vocab[i].encode()
# +1 for padding
char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars))
if len(char_idxs) > max_char_len:
truncated += 1
char_idxs = char_idxs[:max_char_len]
word_to_char[i] = torch.LongTensor(char_idxs)
if truncated > 0:
logger.info('truncated {} words longer than {} characters'.format(truncated, max_char_len))
self.vocab = vocab
self.word_to_char = word_to_char
@property
def padding_idx(self):
return Dictionary().pad() if self.vocab is None else self.vocab.pad()
def reset_parameters(self):
nn.init.xavier_normal_(self.char_embeddings.weight)
nn.init.xavier_normal_(self.symbol_embeddings)
nn.init.xavier_uniform_(self.projection.weight)
nn.init.constant_(self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.)
nn.init.constant_(self.projection.bias, 0.)
def forward(
self,
input: torch.Tensor,
):
if self.char_inputs:
chars = input.view(-1, self.max_char_len)
pads = chars[:, 0].eq(CHAR_PAD_IDX)
eos = chars[:, 0].eq(CHAR_EOS_IDX)
if eos.any():
if self.onnx_trace:
chars = torch.where(eos.unsqueeze(1), chars.new_zeros(1), chars)
else:
chars[eos] = 0
unk = None
else:
flat_words = input.view(-1)
chars = self.word_to_char[flat_words.type_as(self.word_to_char)].type_as(input)
pads = flat_words.eq(self.vocab.pad())
eos = flat_words.eq(self.vocab.eos())
unk = flat_words.eq(self.vocab.unk())
word_embs = self._convolve(chars)
if self.onnx_trace:
if pads.any():
word_embs = torch.where(pads.unsqueeze(1), word_embs.new_zeros(1), word_embs)
if eos.any():
word_embs = torch.where(eos.unsqueeze(1), self.symbol_embeddings[self.eos_idx], word_embs)
if unk is not None and unk.any():
word_embs = torch.where(unk.unsqueeze(1), self.symbol_embeddings[self.unk_idx], word_embs)
else:
if pads.any():
word_embs[pads] = 0
if eos.any():
word_embs[eos] = self.symbol_embeddings[self.eos_idx]
if unk is not None and unk.any():
word_embs[unk] = self.symbol_embeddings[self.unk_idx]
return word_embs.view(input.size()[:2] + (-1,))
def _convolve(
self,
char_idxs: torch.Tensor,
):
char_embs = self.char_embeddings(char_idxs)
char_embs = char_embs.transpose(1, 2) # BTC -> BCT
conv_result = []
for conv in self.convolutions:
x = conv(char_embs)
x, _ = torch.max(x, -1)
x = F.relu(x)
conv_result.append(x)
x = torch.cat(conv_result, dim=-1)
if self.highway is not None:
x = self.highway(x)
x = self.projection(x)
return x
class Highway(torch.nn.Module):
"""
A `Highway layer <https://arxiv.org/abs/1505.00387>`_.
Adopted from the AllenNLP implementation.
"""
def __init__(
self,
input_dim: int,
num_layers: int = 1
):
super(Highway, self).__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2)
for _ in range(num_layers)])
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
# As per comment in AllenNLP:
# We should bias the highway layer to just carry its input forward. We do that by
# setting the bias on `B(x)` to be positive, because that means `g` will be biased to
# be high, so we will carry the input forward. The bias on `B(x)` is the second half
# of the bias vector in each Linear layer.
nn.init.constant_(layer.bias[self.input_dim:], 1)
nn.init.constant_(layer.bias[:self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(
self,
x: torch.Tensor
):
for layer in self.layers:
projection = layer(x)
proj_x, gate = projection.chunk(2, dim=-1)
proj_x = self.activation(proj_x)
gate = torch.sigmoid(gate)
x = gate * x + (gate.new_tensor([1]) - gate) * proj_x
return x
| 6,846 | 32.4 | 106 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/unfold.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn.functional as F
def unfold1d(x, kernel_size, padding_l, pad_value=0):
'''unfold T x B x C to T x B x C x K'''
if kernel_size > 1:
T, B, C = x.size()
x = F.pad(x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value)
x = x.as_strided((T, B, C, kernel_size), (B*C, C, 1, B*C))
else:
x = x.unsqueeze(3)
return x
| 570 | 30.722222 | 91 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/fp32_group_norm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Layer norm done in fp32 (for fp16 training)
"""
import torch.nn as nn
import torch.nn.functional as F
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
| 727 | 27 | 69 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/adaptive_softmax.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import operator
import functools
import torch
import torch.nn.functional as F
from fairseq.modules.quant_noise import quant_noise
from fairseq.modules.fairseq_dropout import FairseqDropout
from torch import nn
class TiedLinear(nn.Module):
def __init__(self, weight, transpose):
super().__init__()
self.weight = weight
self.transpose = transpose
def forward(self, input):
return F.linear(input, self.weight.t() if self.transpose else self.weight)
class TiedHeadModule(nn.Module):
def __init__(self, weights, input_dim, num_classes, q_noise, qn_block_size):
super().__init__()
tied_emb, _ = weights
self.num_words, emb_dim = tied_emb.size()
self.word_proj = quant_noise(TiedLinear(tied_emb, transpose=False), q_noise, qn_block_size)
if input_dim != emb_dim:
self.word_proj = nn.Sequential(
quant_noise(nn.Linear(input_dim, emb_dim, bias=False), q_noise, qn_block_size),
self.word_proj,
)
self.class_proj = quant_noise(nn.Linear(input_dim, num_classes, bias=False), q_noise, qn_block_size)
self.out_dim = self.num_words + num_classes
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def forward(self, input):
inp_sz = functools.reduce(operator.mul, input.shape[:-1], 1)
out = self._float_tensor.new(inp_sz, self.out_dim)
out[:, :self.num_words] = self.word_proj(input.view(inp_sz, -1))
out[:, self.num_words:] = self.class_proj(input.view(inp_sz, -1))
return out
class AdaptiveSoftmax(nn.Module):
"""
This is an implementation of the efficient softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309).
"""
def __init__(self, vocab_size, input_dim, cutoff, dropout, factor=4., adaptive_inputs=None, tie_proj=False,
q_noise=0, qn_block_size=8):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert vocab_size == cutoff[
-1], 'cannot specify cutoff larger than vocab size'
output_dim = cutoff[0] + len(cutoff) - 1
self.vocab_size = vocab_size
self.cutoff = cutoff
self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__)
self.input_dim = input_dim
self.factor = factor
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.lsm = nn.LogSoftmax(dim=1)
if adaptive_inputs is not None:
self.head = TiedHeadModule(adaptive_inputs.weights_for_band(0), input_dim, len(cutoff) - 1, self.q_noise, self.qn_block_size)
else:
self.head = quant_noise(nn.Linear(input_dim, output_dim, bias=False), self.q_noise, self.qn_block_size)
self._make_tail(adaptive_inputs, tie_proj)
def init_weights(m):
if hasattr(m, 'weight') and not isinstance(m, TiedLinear) and not isinstance(m, TiedHeadModule):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer('version', torch.LongTensor([1]))
def _make_tail(self, adaptive_inputs=None, tie_proj=False):
self.tail = nn.ModuleList()
for i in range(len(self.cutoff) - 1):
dim = int(self.input_dim // self.factor ** (i + 1))
tied_emb, tied_proj = adaptive_inputs.weights_for_band(i + 1) \
if adaptive_inputs is not None else (None, None)
if tied_proj is not None:
if tie_proj:
proj = quant_noise(TiedLinear(tied_proj, transpose=True), self.q_noise, self.qn_block_size)
else:
proj = quant_noise(nn.Linear(tied_proj.size(0), tied_proj.size(1), bias=False), self.q_noise, self.qn_block_size)
else:
proj = quant_noise(nn.Linear(self.input_dim, dim, bias=False), self.q_noise, self.qn_block_size)
if tied_emb is None:
out_proj = nn.Linear(dim, self.cutoff[i + 1] - self.cutoff[i], bias=False)
else:
out_proj = TiedLinear(tied_emb, transpose=False)
m = nn.Sequential(
proj,
nn.Dropout(self.dropout_module.p),
quant_noise(out_proj, self.q_noise, self.qn_block_size),
)
self.tail.append(m)
def upgrade_state_dict_named(self, state_dict, name):
version_name = name + '.version'
if version_name not in state_dict:
raise Exception('This version of the model is no longer supported')
def adapt_target(self, target):
"""
In order to be efficient, the AdaptiveSoftMax does not compute the
scores for all the word of the vocabulary for all the examples. It is
thus necessary to call the method adapt_target of the AdaptiveSoftMax
layer inside each forward pass.
"""
target = target.view(-1)
new_target = [target.clone()]
target_idxs = []
for i in range(len(self.cutoff) - 1):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
new_target[0][mask] = self.cutoff[0] + i
if mask.any():
target_idxs.append(mask.nonzero(as_tuple=False).squeeze(1))
new_target.append(target[mask].add(-self.cutoff[i]))
else:
target_idxs.append(None)
new_target.append(None)
return new_target, target_idxs
def forward(self, input, target):
"""
Args:
input: (b x t x d)
target: (b x t)
Returns:
2 lists: output for each cutoff section and new targets by cut off
"""
input = input.contiguous().view(-1, input.size(-1))
input = self.dropout_module(input)
new_target, target_idxs = self.adapt_target(target)
output = [self.head(input)]
for i in range(len(target_idxs)):
if target_idxs[i] is not None:
output.append(self.tail[i](input.index_select(0, target_idxs[i])))
else:
output.append(None)
return output, new_target
def get_log_prob(self, input, target):
"""
Computes the log probabilities for all the words of the vocabulary,
given a 2D tensor of hidden vectors.
"""
bsz, length, dim = input.size()
input = input.contiguous().view(-1, dim)
if target is not None:
_, target_idxs = self.adapt_target(target)
else:
target_idxs = None
head_y = self.head(input)
log_probs = head_y.new_zeros(input.size(0), self.vocab_size)
head_sz = self.cutoff[0] + len(self.tail)
log_probs[:, :head_sz] = self.lsm(head_y)
tail_priors = log_probs[:, self.cutoff[0]: head_sz].clone()
for i in range(len(self.tail)):
start = self.cutoff[i]
end = self.cutoff[i + 1]
if target_idxs is None:
tail_out = log_probs[:, start:end]
tail_out.copy_(self.tail[i](input))
log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])
elif target_idxs[i] is not None:
idxs = target_idxs[i]
tail_out = log_probs[idxs, start:end]
tail_out.copy_(self.tail[i](input[idxs]))
log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])
log_probs = log_probs.view(bsz, length, -1)
return log_probs
| 7,945 | 35.95814 | 137 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/fc_select.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from .fc.wn import CWN
from .fc.conv import Conv1d
from .fc.dropout_fc import DropoutFC
from .fc.oni_fc import ONI_Linear
def parse_fc(fc_type):
args = fc_type.split("_")
return args
def FcSelect(input_dim, output_dim, fc_type, layer_id=-1):
args = parse_fc(fc_type)
fc_type = args[0]
#print(args);exit()
if fc_type == "nnlinear":
return nn.Linear(input_dim, output_dim)
elif fc_type == "linear":
if len(args)==1:
return Linear(input_dim, output_dim)
if len(args)==2:
return Linear(input_dim, output_dim, scale=float(args[1]))
if len(args)==3:
return Linear(input_dim, output_dim, scale=float(args[1]), zero_bias=int(args[2]))
else:
print("error len linear!")
elif fc_type == "dpfc":
if args[-1]=='select':
if args[-2].find(str(layer_id))>=0:
return DropoutFC(input_dim, output_dim, dropout=float(args[1]),scale=float(args[2]))
else:
return DropoutFC(input_dim, output_dim, dropout=0.0,scale=float(args[2]))
if len(args)==1:
return DropoutFC(input_dim, output_dim, dropout=float(args[1]))
if len(args)==2:
return DropoutFC(input_dim, output_dim, dropout=float(args[1]), scale=float(args[2]))
else:
print("error len dpfc!")
elif fc_type == "onifc":
if args[-1]=='select':
if args[-2].find(str(layer_id))>=0:
return ONI_Linear(input_dim, output_dim)
else:
return Linear(input_dim, output_dim, scale=float(args[1]))
if len(args)==1:
return Linear(input_dim, output_dim, scale=float(args[1]))
else:
print("error len dpfc!")
elif fc_type == "conv1d":
return Conv1d(input_dim,output_dim,kernel_size=int(args[1]))
elif fc_type == "wn":
return CWN(input_dim, output_dim, iscenter=int(args[1]), Nscale=float(args[2]), adjustScale=int(args[3]))
else:
print("error FcSelect!")
exit()
#elif norm_type == 'power':
# return MaskPowerNorm(embed_dim, group_num=head_num, warmup_iters=warmup_updates)
def Linear(in_features, out_features, scale=1.0, zero_bias=True):
m = nn.Linear(in_features, out_features, bias=True)
nn.init.xavier_uniform_(m.weight,gain=scale)
if zero_bias:
nn.init.constant_(m.bias, 0.0)
return m
| 2,654 | 38.044118 | 113 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/dropout_select.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from .noise_dropout import NoiseDropout
def parse_dropout(dropout_type):
args = dropout_type.split("_")
return args
def DropoutSelect(dropout_type):
args = parse_dropout(dropout_type)
dropout_type = args[0]
if dropout_type == "hard":
return nn.Dropout(p=args[1])
elif dropout_type == "noise":
return NoiseDropout(alpha=args[1])
else:
print("error DropoutSelect!")
exit()
| 642 | 25.791667 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/sparse_transformer_sentence_encoder_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.modules import TransformerSentenceEncoderLayer
from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
"""
Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention)
"""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = 'relu',
export: bool = False,
is_bidirectional: bool = True,
stride: int = 32,
expressivity: int = 8,
) -> None:
super().__init__(
embedding_dim, ffn_embedding_dim, num_attention_heads, dropout,
attention_dropout, activation_dropout, activation_fn, export
)
self.self_attn = SparseMultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
add_bias_kv=False,
add_zero_attn=False,
self_attention=True,
is_bidirectional=is_bidirectional,
stride=stride,
expressivity=expressivity,
)
| 1,490 | 31.413043 | 80 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/conv_tbc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.nn.modules.utils import _single
class ConvTBC(torch.nn.Module):
"""1D convolution over an input of shape (time x batch x channel)
The implementation uses gemm to perform the convolution. This implementation
is faster than cuDNN for small kernel sizes.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(torch.Tensor(
self.kernel_size[0], in_channels, out_channels))
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
def forward(self, input):
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0])
def __repr__(self):
s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
', padding={padding}')
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
| 1,356 | 35.675676 | 90 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/transformer_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch.serialization import save
from fairseq import utils
from fairseq.modules import LayerNorm
from fairseq.modules.norm.mask_identity import MaskIdentityNorm as InFcNorm
from fairseq.modules.norm.mask_identity import MaskIdentityNorm as NoNorm
#from fairseq.modules import CWN
from fairseq.modules import NormSelect
from fairseq.modules import FcSelect
from fairseq.modules import ActivationSelect
from fairseq.modules import MultiheadAttention, MultiheadAttentionSimple
from fairseq.modules.quant_noise import quant_noise
from fairseq.modules.fairseq_dropout import FairseqDropout
from torch import Tensor
import numpy as np
from scipy import io
import time
from .statistics_utils import insert_probe
from .statistics_utils import record_forward_weight_norm
from .statistics_utils import save_attn_weights
from .statistics_utils import save_residual_proportion
from .statistics_init import init_config
cond_name = ['c_max', 'c_20', 'c_50', 'c_80', 'r_total', 'r', 'intra_average_sim', 'inter_average_sim', 'total_average_sim']
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
__count = 0
def update_cnt(self):
TransformerEncoderLayer.__count += 1
def forward_hook(self,savefile):
def forward_hook_template(m,i,o):
input = i[0]
self.input_norm[savefile].append(torch.norm(input,dim=(0,2)))
return forward_hook_template
def __init__(self, args):
super().__init__()
self.id = self.__count #对每个transformer_encoder设定唯一标识
self.update_cnt()
self.orth_penalty = args.orth_penalty
self.embed_dim = args.encoder_embed_dim
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.self_attn = self.build_self_attention(self.embed_dim, args)
#self.self_attn_layer_norm = NormSelect(args.encoder_att_norm,self.embed_dim, args.wseq, prefix = self.prefix)
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
#self.dropout_module = FairseqDropout(args.encoder_dropout, module_name=self.__class__.__name__)
#self.after_norm_dropout_module = nn.Dropout(p=args.encoder_after_norm_dropout)
#self.activation_fn = utils.get_activation_fn(
# activation=getattr(args, "activation_fn", "relu")
#)
self.activation_fn = ActivationSelect(args.encoder_activation)
activation_dropout_p = getattr(args, "activation_dropout", 0)
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0)
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.encoder_normalize_before
#self.fc1 = nn.Linear(self.embed_dim, args.encoder_ffn_embed_dim)
#self.fc2 = nn.Linear(args.encoder_ffn_embed_dim,self.embed_dim)
self.fc1 = FcSelect(self.embed_dim, args.encoder_ffn_embed_dim, args.encoder_fc1, self.id)
self.fc2 = FcSelect(args.encoder_ffn_embed_dim, self.embed_dim, args.encoder_fc2, self.id)
init_config(self, args)
self.self_attn_layer_norm = NormSelect(args.encoder_att_norm, self.embed_dim, self.id, self.prefix)
self.in_ffn_norm = NormSelect(args.encoder_in_ffn_norm, args.encoder_ffn_embed_dim, self.id, self.prefix)
#self.before_relu_nonorm = NormSelect("batch_nonorm", args.encoder_ffn_embed_dim, self.id, self.prefix)
#self.after_relu_nonorm = NormSelect("batch_nonorm", args.encoder_ffn_embed_dim, self.id, self.prefix)
self.final_layer_norm = NormSelect(args.encoder_ffn_norm, self.embed_dim, self.id, self.prefix)
#m.register_parameter('weight_g',nn.parameter.Parameter(torch.ones(3,1)))可更改scale
if self.id == -1:
self.fc1.weight_v.register_hook(self.hook_v)
self.fc1.weight_g.register_hook(self.hook_v)
def orth_loss(self, w):
d1,d2 = w.shape
I = torch.eye(min(d1,d2)).cuda()
if d1>d2:
cov = torch.matmul(w.transpose(0,1),w)
else:
cov = torch.matmul(w,w.transpose(0,1))
cur_loss = ((cov-2*I)**2).sum()
#print("orth loss:",cur_loss)
return cur_loss
def loss(self):
assert self.training, "wrongly adding orthorgonal penalty!"
loss = 0
if self.orth_penalty>0:
loss += self.orth_penalty*self.orth_loss(self.fc1.weight)
loss += self.orth_penalty*self.orth_loss(self.fc2.weight)
return loss
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_self_attention(self, embed_dim, args):
if args.encoder_attention=='origin':
return MultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
normalize_q=args.normalize_q,
normalize_k=args.normalize_k,
normalize_v=args.normalize_v,
g0=args.g0,
fix_g0=args.fix_g0,
)
elif args.encoder_attention=='position':
return MultiheadAttentionSimple(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
normalize_q=args.normalize_q,
normalize_k=args.normalize_k,
normalize_v=args.normalize_v,
g0=args.g0,
fix_g0=args.fix_g0,
)
else:
print("error encoder attention!");exit()
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, src_tokens=None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
#if(encoder_padding_mask.sum()!=0 and 0): #有时前边几个为True,剩余为False
# print("has mask!",self.id)
# print(encoder_padding_mask.sum())
# print(encoder_padding_mask)
#exit()
#if(attn_mask is not None and 0): #均为None
# print("has att mask!",self.id)
# print(attn_mask);exit()
if self.training:
self.step += 1
self.src_tokens = src_tokens
self.mask = 1 - encoder_padding_mask.unsqueeze(dim=-1).type_as(x)
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
#record the weight matrix in FFN module
record_forward_weight_norm(self)
residual = x
if self.normalize_before:
position = 'att_norm_input'
insert_probe(self, x, position)
x = self.self_attn_layer_norm(x,encoder_padding_mask)
#x = self.after_norm_dropout_module(x)
#Attention Layer
position = 'att_input'
insert_probe(self, x, position)
x, attn_weights = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
)
save_attn_weights(self, attn_weights)
position = 'att_output'
insert_probe(self, x, position)
x = self.dropout_module(x) #先子模块dropout,再与residual相加
save_residual_proportion(self, x, residual, module='att')
x = residual + x
if not self.normalize_before:
position = 'att_norm_input'
insert_probe(self, x, position)
x = self.self_attn_layer_norm(x, encoder_padding_mask)
#x = self.after_norm_dropout_module(x)
#FFN
residual = x
if self.normalize_before:
position = 'ffn_norm_input'
insert_probe(self, x, position)
x = self.final_layer_norm(x,encoder_padding_mask)
#x = self.after_norm_dropout_module(x)
position = 'ffn_input'
insert_probe(self, x, position)
x = self.fc1(x)
position = 'before_relu'
insert_probe(self, x, position)
x = self.in_ffn_norm(x, encoder_padding_mask)
#x = self.before_relu_nonorm(x, encoder_padding_mask)
x = self.activation_fn(x)
#x = self.after_relu_nonorm(x, encoder_padding_mask)
position = 'after_relu'
insert_probe(self, x, position)
x = self.activation_dropout_module(x)
x = self.fc2(x)
position = 'ffn_output'
insert_probe(self, x, position)
x = self.dropout_module(x)
save_residual_proportion(self, x, residual, module='ffn')
x = residual + x
if not self.normalize_before:
position = 'ffn_norm_input'
insert_probe(self, x, position)
x = self.final_layer_norm(x, encoder_padding_mask)
#x = self.after_norm_dropout_module(x)
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
__count = 0
def update_cnt(self):
TransformerDecoderLayer.__count += 1
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.id = self.__count #对每个transformer_encoder设定唯一标识
self.update_cnt()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
#self.dropout_module = FairseqDropout(args.decoder_dropout, module_name=self.__class__.__name__)
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
activation_dropout_p = getattr(args, "activation_dropout", 0)
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0)
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
#self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
#self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.self_attn_layer_norm = NormSelect(args.decoder_att_norm ,self.embed_dim)
#self.self_attn_layer_norm = NoNorm(self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
#self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
#self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
#self.encoder_attn_layer_norm = NoNorm(self.embed_dim)
self.encoder_attn_layer_norm = NormSelect(args.decoder_cross_att_norm ,self.embed_dim)
self.fc1 = FcSelect(self.embed_dim, args.decoder_ffn_embed_dim, args.decoder_fc1)
self.in_ffn_norm = NormSelect(args.decoder_in_ffn_norm, args.decoder_ffn_embed_dim)
self.fc2 = FcSelect(args.decoder_ffn_embed_dim, self.embed_dim, args.decoder_fc2)
#self.fc1 = self.build_fc1(
# self.embed_dim, args.decoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size
#)
#self.fc2 = self.build_fc2(
# args.decoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size
#)
#self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
#self.final_layer_norm = LayerNorm(self.embed_dim)
#self.final_layer_norm = NoNorm(self.embed_dim)
self.final_layer_norm = NormSelect(args.decoder_ffn_norm ,self.embed_dim)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
#if self.id==0:
# print("decoding!")
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
#print(self_attn_padding_mask)
#assert self_attn_padding_mask is not None, "wrong attn padding mask!" #为什么训练时会出现none?
x = self.self_attn_layer_norm(x, self_attn_padding_mask)
if prev_self_attn_state is not None: #false
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
): #false
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x, self_attn_padding_mask)
if self.encoder_attn is not None: #true
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x, self_attn_padding_mask)
if prev_attn_state is not None: #false
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x, self_attn_padding_mask)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x, self_attn_padding_mask)
x = self.fc1(x)
x = self.in_ffn_norm(x,self_attn_padding_mask) #add decoder padding mask?
x = self.activation_fn(x)
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x, self_attn_padding_mask)
if self.onnx_trace and incremental_state is not None: #false
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| 24,120 | 40.162116 | 125 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/beamable_mm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class BeamableMM(nn.Module):
"""This module provides an optimized MM for beam decoding with attention.
It leverage the fact that the source-side of the input is replicated beam
times and the target-side of the input is of width one. This layer speeds up
inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)}
with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}.
"""
def __init__(self, beam_size=None):
super(BeamableMM, self).__init__()
self.beam_size = beam_size
def forward(self, input1, input2):
if (
not self.training and # test mode
self.beam_size is not None and # beam size is set
input1.dim() == 3 and # only support batched input
input1.size(1) == 1 # single time step update
):
bsz, beam = input1.size(0), self.beam_size
# bsz x 1 x nhu --> bsz/beam x beam x nhu
input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1)
# bsz x sz2 x nhu --> bsz/beam x sz2 x nhu
input2 = input2.unfold(0, beam, beam)[:, :, :, 0]
# use non batched operation if bsz = beam
if input1.size(0) == 1:
output = torch.mm(input1[0, :, :], input2[0, :, :])
else:
output = input1.bmm(input2)
return output.view(bsz, 1, -1)
else:
return input1.bmm(input2)
def set_beam_size(self, beam_size):
self.beam_size = beam_size
| 1,779 | 36.083333 | 80 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .adaptive_input import AdaptiveInput
from .adaptive_softmax import AdaptiveSoftmax
from .beamable_mm import BeamableMM
from .character_token_embedder import CharacterTokenEmbedder
from .conv_tbc import ConvTBC
from .cross_entropy import cross_entropy
from .downsampled_multihead_attention import DownsampledMultiHeadAttention
from .dynamic_convolution import DynamicConv, DynamicConv1dTBC
from .dynamic_crf_layer import DynamicCRF
from .fairseq_dropout import FairseqDropout
from .fp32_group_norm import Fp32GroupNorm
from .gelu import gelu, gelu_accurate
from .grad_multiply import GradMultiply
from .gumbel_vector_quantizer import GumbelVectorQuantizer
from .kmeans_vector_quantizer import KmeansVectorQuantizer
from .layer_drop import LayerDropModuleList
from .layer_norm import Fp32LayerNorm, LayerNorm
#from .wn import CWN, MultiHeadCWN
from .norm_select import NormSelect
from .fc_select import FcSelect
from .activation_select import ActivationSelect
from .learned_positional_embedding import LearnedPositionalEmbedding
from .lightweight_convolution import LightweightConv, LightweightConv1dTBC
from .linearized_convolution import LinearizedConvolution
from .multihead_attention import MultiheadAttention
from .multihead_attention_simple import MultiheadAttentionSimple
from .positional_embedding import PositionalEmbedding
from .same_pad import SamePad
from .scalar_bias import ScalarBias
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer
from .transformer_sentence_encoder import TransformerSentenceEncoder
from .transpose_last import TransposeLast
from .unfold import unfold1d
from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer
from .vggblock import VGGBlock
#print(MaskPowerNorm)
__all__ = [
'AdaptiveInput',
'AdaptiveSoftmax',
'BeamableMM',
'CharacterTokenEmbedder',
'ConvTBC',
'cross_entropy',
'DownsampledMultiHeadAttention',
'DynamicConv1dTBC',
'DynamicConv',
'DynamicCRF',
'FairseqDropout',
'Fp32GroupNorm',
'Fp32LayerNorm',
'gelu',
'gelu_accurate',
'GradMultiply',
'GumbelVectorQuantizer',
'KmeansVectorQuantizer',
'LayerDropModuleList',
'LayerNorm',
'LearnedPositionalEmbedding',
'LightweightConv1dTBC',
'LightweightConv',
'LinearizedConvolution',
'MultiheadAttention',
'PositionalEmbedding',
'SamePad',
'ScalarBias',
'SinusoidalPositionalEmbedding',
'TransformerSentenceEncoderLayer',
'TransformerSentenceEncoder',
'TransformerDecoderLayer',
'TransformerEncoderLayer',
'TransposeLast',
'VGGBlock',
'unfold1d',
]
| 2,892 | 34.716049 | 79 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/multihead_attention_ori.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size)
self.v_proj = quant_noise(nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size)
self.q_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size)
self.out_proj = quant_noise(nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.tpu = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def prepare_for_tpu_(self, **kwargs):
self.tpu = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if (
not self.onnx_trace
and not self.tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not self.tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf")
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf'))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| 19,130 | 39.023013 | 103 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/layer_norm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if torch.jit.is_scripting():
export = True
if not export and torch.cuda.is_available() and has_fused_layernorm:
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
| 1,499 | 29 | 81 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/kmeans_vector_quantizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from fairseq.modules import Fp32GroupNorm
class KmeansVectorQuantizer(nn.Module):
def __init__(
self, dim, num_vars, groups, combine_groups, vq_dim, time_first, gamma=0.25
):
'''Vector quantization using straight pass-through estimator (i.e. kmeans)
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
gamma: commitment loss coefficient
'''
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.vq_dim = vq_dim
self.time_first = time_first
assert (
vq_dim % groups == 0
), f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
self.var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.embedding = nn.Parameter(
0.01 * torch.randn(num_vars, num_groups, self.var_dim)
)
self.projection = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size=1, groups=groups, bias=False),
Fp32GroupNorm(groups, dim),
)
self.gamma = gamma
self.mse_mean = nn.MSELoss(reduction="mean")
def _pass_grad(self, x, y):
""" Manually set gradient for backward pass.
for y = f(x), ensure that during the backward pass,
dL/dy = dL/dx regardless of f(x).
Returns:
y, with the gradient forced to be dL/dy = dL/dx.
"""
return y.detach() + (x - x.detach())
@property
def expand_embedding(self):
if self.combine_groups:
return self.embedding.expand(self.num_vars, self.groups, self.var_dim)
return self.embedding
def forward_idx(self, x):
res = self.forward(x, produce_targets=True)
return res["x"], res["targets"]
def forward(self, x, produce_targets=False):
result = {"num_vars": self.num_vars}
if self.time_first:
x = x.transpose(1, 2)
bsz, fsz, tsz = x.shape
ze = self.projection(x)
ze_ = ze.view(bsz, self.groups, self.var_dim, tsz).permute(0, 3, 1, 2)
d = (
(ze_.unsqueeze(0) - self.expand_embedding.unsqueeze(1).unsqueeze(1))
.view(self.num_vars, bsz, tsz, self.groups, -1)
.norm(dim=-1, p=2)
)
idx = d.argmin(dim=0)
zq = (
torch.stack(
[
self.expand_embedding[idx[..., group], group]
for group in range(self.groups)
],
dim=-2,
)
.view(bsz, tsz, self.groups * self.var_dim)
.permute(0, 2, 1)
)
assert ze.shape == zq.shape, (ze.shape, zq.shape)
x = self._pass_grad(ze, zq)
hard_x = (
idx.new_zeros(bsz*tsz*self.groups, self.num_vars)
.scatter_(-1, idx.view(-1, 1), 1.0)
.view(bsz * tsz, self.groups, -1)
)
hard_probs = torch.mean(hard_x.float(), dim=0)
result["code_perplexity"] = torch.exp(
-torch.sum(hard_probs * torch.log(hard_probs + 1e-7), dim=-1)
).sum()
if produce_targets:
result["targets"] = idx
if self.time_first:
x = x.transpose(1, 2) # BCT -> BTC
result["x"] = x
ze = ze.float()
zq = zq.float()
latent_loss = self.mse_mean(zq, ze.detach())
commitment_loss = self.mse_mean(ze, zq.detach())
result["kmeans_loss"] = latent_loss + self.gamma * commitment_loss
return result
| 4,248 | 31.937984 | 89 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/layer_drop.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
LayerDrop as described in https://arxiv.org/abs/1909.11556.
"""
import torch
import torch.nn as nn
class LayerDropModuleList(nn.ModuleList):
"""
A LayerDrop implementation based on :class:`torch.nn.ModuleList`.
We refresh the choice of which layers to drop every time we iterate
over the LayerDropModuleList instance. During evaluation we always
iterate over all layers.
Usage::
layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
for layer in layers: # this might iterate over layers 1 and 3
x = layer(x)
for layer in layers: # this might iterate over all layers
x = layer(x)
for layer in layers: # this might not iterate over any layers
x = layer(x)
Args:
p (float): probability of dropping out each layer
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, p, modules=None):
super().__init__(modules)
self.p = p
def __iter__(self):
dropout_probs = torch.empty(len(self)).uniform_()
for i, m in enumerate(super().__iter__()):
if not self.training or (dropout_probs[i] > self.p):
yield m
| 1,409 | 30.333333 | 71 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/dynamic_crf_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file is to re-implemented the low-rank and beam approximation of CRF layer
Proposed by:
Sun, Zhiqing, et al.
Fast Structured Decoding for Sequence Models
https://arxiv.org/abs/1910.11555
The CRF implementation is mainly borrowed from
https://github.com/kmkurn/pytorch-crf/blob/master/torchcrf/__init__.py
"""
import numpy as np
import torch
import torch.nn as nn
def logsumexp(x, dim=1):
return torch.logsumexp(x.float(), dim=dim).type_as(x)
class DynamicCRF(nn.Module):
"""Dynamic CRF layer is used to approximate the traditional
Conditional Random Fields (CRF)
$P(y | x) = 1/Z(x) exp(sum_i s(y_i, x) + sum_i t(y_{i-1}, y_i, x))$
where in this function, we assume the emition scores (s) are given,
and the transition score is a |V| x |V| matrix $M$
in the following two aspects:
(1) it used a low-rank approximation for the transition matrix:
$M = E_1 E_2^T$
(2) it used a beam to estimate the normalizing factor Z(x)
"""
def __init__(self, num_embedding, low_rank=32, beam_size=64):
super().__init__()
self.E1 = nn.Embedding(num_embedding, low_rank)
self.E2 = nn.Embedding(num_embedding, low_rank)
self.vocb = num_embedding
self.rank = low_rank
self.beam = beam_size
def extra_repr(self):
return "vocab_size={}, low_rank={}, beam_size={}".format(
self.vocb, self.rank, self.beam)
def forward(self, emissions, targets, masks, beam=None):
"""
Compute the conditional log-likelihood of a sequence of target tokens given emission scores
Args:
emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output
``(batch_size, seq_len, vocab_size)``. We assume batch-first
targets (`~torch.LongTensor`): Sequence of target token indices
``(batch_size, seq_len)
masks (`~torch.ByteTensor`): Mask tensor with the same size as targets
Returns:
`~torch.Tensor`: approximated log-likelihood
"""
numerator = self._compute_score(emissions, targets, masks)
denominator = self._compute_normalizer(emissions, targets, masks, beam)
return numerator - denominator
def forward_decoder(self, emissions, masks=None, beam=None):
"""
Find the most likely output sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score are usually the unnormalized decoder output
``(batch_size, seq_len, vocab_size)``. We assume batch-first
masks (`~torch.ByteTensor`): Mask tensor with the same size as targets
Returns:
`~torch.LongTensor`: decoded sequence from the CRF model
"""
return self._viterbi_decode(emissions, masks, beam)
def _compute_score(self, emissions, targets, masks=None):
batch_size, seq_len = targets.size()
emission_scores = emissions.gather(2, targets[:, :, None])[:, :, 0] # B x T
transition_scores = (self.E1(targets[:, :-1]) * self.E2(targets[:, 1:])).sum(2)
scores = emission_scores
scores[:, 1:] += transition_scores
if masks is not None:
scores = scores * masks.type_as(scores)
return scores.sum(-1)
def _compute_normalizer(self, emissions, targets=None, masks=None, beam=None):
# HACK: we include "target" which is a hueristic for training
# HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?)
beam = beam if beam is not None else self.beam
batch_size, seq_len = emissions.size()[:2]
if targets is not None:
_emissions = emissions.scatter(2, targets[:, :, None], np.float('inf'))
beam_targets = _emissions.topk(beam, 2)[1]
beam_emission_scores = emissions.gather(2, beam_targets)
else:
beam_emission_scores, beam_targets = emissions.topk(beam, 2)
beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D
beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D
beam_transition_matrix = torch.bmm(
beam_transition_score1.view(-1, beam, self.rank),
beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2))
beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam)
# compute the normalizer in the log-space
score = beam_emission_scores[:, 0] # B x K
for i in range(1, seq_len):
next_score = score[:, :, None] + beam_transition_matrix[:, i-1]
next_score = logsumexp(next_score, dim=1) + beam_emission_scores[:, i]
if masks is not None:
score = torch.where(masks[:, i:i+1], next_score, score)
else:
score = next_score
# Sum (log-sum-exp) over all possible tags
return logsumexp(score, dim=1)
def _viterbi_decode(self, emissions, masks=None, beam=None):
# HACK: we use a beam of tokens to approximate the normalizing factor (which is bad?)
beam = beam if beam is not None else self.beam
batch_size, seq_len = emissions.size()[:2]
beam_emission_scores, beam_targets = emissions.topk(beam, 2)
beam_transition_score1 = self.E1(beam_targets[:, :-1]) # B x (T-1) x K x D
beam_transition_score2 = self.E2(beam_targets[:, 1:]) # B x (T-1) x K x D
beam_transition_matrix = torch.bmm(
beam_transition_score1.view(-1, beam, self.rank),
beam_transition_score2.view(-1, beam, self.rank).transpose(1, 2))
beam_transition_matrix = beam_transition_matrix.view(batch_size, -1, beam, beam)
traj_tokens, traj_scores = [], []
finalized_tokens, finalized_scores = [], []
# compute the normalizer in the log-space
score = beam_emission_scores[:, 0] # B x K
dummy = torch.arange(beam, device=score.device).expand(*score.size()).contiguous()
for i in range(1, seq_len):
traj_scores.append(score)
_score = score[:, :, None] + beam_transition_matrix[:, i-1]
_score, _index = _score.max(dim=1)
_score = _score + beam_emission_scores[:, i]
if masks is not None:
score = torch.where(masks[:, i: i+1], _score, score)
index = torch.where(masks[:, i: i+1], _index, dummy)
else:
score, index = _score, _index
traj_tokens.append(index)
# now running the back-tracing and find the best
best_score, best_index = score.max(dim=1)
finalized_tokens.append(best_index[:, None])
finalized_scores.append(best_score[:, None])
for idx, scs in zip(reversed(traj_tokens), reversed(traj_scores)):
previous_index = finalized_tokens[-1]
finalized_tokens.append(idx.gather(1, previous_index))
finalized_scores.append(scs.gather(1, previous_index))
finalized_tokens.reverse()
finalized_tokens = torch.cat(finalized_tokens, 1)
finalized_tokens = beam_targets.gather(2, finalized_tokens[:, :, None])[:, :, 0]
finalized_scores.reverse()
finalized_scores = torch.cat(finalized_scores, 1)
finalized_scores[:, 1:] = finalized_scores[:, 1:] - finalized_scores[:, :-1]
return finalized_scores, finalized_tokens
| 7,676 | 40.497297 | 99 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/scalar_bias.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class ScalarBias(torch.autograd.Function):
"""
Adds a vector of scalars, used in self-attention mechanism to allow
the model to optionally attend to this vector instead of the past
"""
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, size[dim] - 1).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
| 888 | 26.78125 | 74 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/transformer_sentence_encoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import torch
import torch.nn as nn
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
TransformerSentenceEncoderLayer,
)
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
module.q_proj.weight.data.normal_(mean=0.0, std=0.02)
module.k_proj.weight.data.normal_(mean=0.0, std=0.02)
module.v_proj.weight.data.normal_(mean=0.0, std=0.02)
class TransformerSentenceEncoder(nn.Module):
"""
Implementation for a Bi-directional Transformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
TransformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(
self,
padding_idx: int,
vocab_size: int,
num_encoder_layers: int = 6,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
layerdrop: float = 0.0,
max_seq_len: int = 256,
num_segments: int = 2,
use_position_embeddings: bool = True,
offset_positions_by_padding: bool = True,
encoder_normalize_before: bool = False,
apply_bert_init: bool = False,
activation_fn: str = "relu",
learned_pos_embedding: bool = True,
embed_scale: float = None,
freeze_embeddings: bool = False,
n_trans_layers_to_freeze: int = 0,
export: bool = False,
traceable: bool = False,
q_noise: float = 0.0,
qn_block_size: int = 8,
) -> None:
super().__init__()
self.padding_idx = padding_idx
self.vocab_size = vocab_size
self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__)
self.layerdrop = layerdrop
self.max_seq_len = max_seq_len
self.embedding_dim = embedding_dim
self.num_segments = num_segments
self.use_position_embeddings = use_position_embeddings
self.apply_bert_init = apply_bert_init
self.learned_pos_embedding = learned_pos_embedding
self.traceable = traceable
self.tpu = False # whether we're on TPU
self.embed_tokens = self.build_embedding(
self.vocab_size, self.embedding_dim, self.padding_idx
)
self.embed_scale = embed_scale
if q_noise > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
q_noise,
qn_block_size,
)
else:
self.quant_noise = None
self.segment_embeddings = (
nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None)
if self.num_segments > 0
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_seq_len,
self.embedding_dim,
padding_idx=(self.padding_idx if offset_positions_by_padding else None),
learned=self.learned_pos_embedding,
)
if self.use_position_embeddings
else None
)
if self.layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend([
self.build_transformer_sentence_encoder_layer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=self.dropout_module.p,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
for _ in range(num_encoder_layers)
])
if encoder_normalize_before:
self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export)
else:
self.emb_layer_norm = None
# Apply initialization of model params after building the model
if self.apply_bert_init:
self.apply(init_bert_params)
def freeze_module_params(m):
if m is not None:
for p in m.parameters():
p.requires_grad = False
if freeze_embeddings:
freeze_module_params(self.embed_tokens)
freeze_module_params(self.segment_embeddings)
freeze_module_params(self.embed_positions)
freeze_module_params(self.emb_layer_norm)
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return nn.Embedding(vocab_size, embedding_dim, padding_idx)
def build_transformer_sentence_encoder_layer(
self,
embedding_dim,
ffn_embedding_dim,
num_attention_heads,
dropout,
attention_dropout,
activation_dropout,
activation_fn,
export,
q_noise,
qn_block_size,
):
return TransformerSentenceEncoderLayer(
embedding_dim=embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
def prepare_for_tpu_(self, **kwargs):
self.tpu = True
def forward(
self,
tokens: torch.Tensor,
segment_labels: torch.Tensor = None,
last_state_only: bool = False,
positions: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# compute padding mask. This is needed for multi-head attention
padding_mask = tokens.eq(self.padding_idx)
if not self.traceable and not self.tpu and not padding_mask.any():
padding_mask = None
x = self.embed_tokens(tokens)
if self.embed_scale is not None:
x *= self.embed_scale
if self.embed_positions is not None:
x += self.embed_positions(tokens, positions=positions)
if self.segment_embeddings is not None and segment_labels is not None:
x += self.segment_embeddings(segment_labels)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.emb_layer_norm is not None:
x = self.emb_layer_norm(x)
x = self.dropout_module(x)
# account for padding while computing the representation
if padding_mask is not None:
x *= 1 - padding_mask.unsqueeze(-1).type_as(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
inner_states = []
if not last_state_only:
inner_states.append(x)
for layer in self.layers:
x, _ = layer(x, self_attn_padding_mask=padding_mask)
if not last_state_only:
inner_states.append(x)
sentence_rep = x[0, :, :]
if last_state_only:
inner_states = [x]
if self.traceable:
return torch.stack(inner_states), sentence_rep
else:
return inner_states, sentence_rep
| 9,720 | 33.842294 | 90 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/grad_multiply.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
| 442 | 22.315789 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/sparse_transformer_sentence_encoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq.modules import TransformerSentenceEncoder
from fairseq.modules.sparse_transformer_sentence_encoder_layer import SparseTransformerSentenceEncoderLayer
class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
"""
Sparse implementation of the TransformerSentenceEncoder
- see SparseMultiheadAttention
"""
def __init__(
self,
padding_idx: int,
vocab_size: int,
num_encoder_layers: int = 6,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_seq_len: int = 256,
num_segments: int = 2,
use_position_embeddings: bool = True,
offset_positions_by_padding: bool = True,
encoder_normalize_before: bool = False,
apply_bert_init: bool = False,
activation_fn: str = "relu",
learned_pos_embedding: bool = True,
embed_scale: float = None,
freeze_embeddings: bool = False,
n_trans_layers_to_freeze: int = 0,
export: bool = False,
is_bidirectional: bool = True,
stride: int = 32,
expressivity: int = 8,
) -> None:
super().__init__(
padding_idx, vocab_size, num_encoder_layers, embedding_dim,
ffn_embedding_dim, num_attention_heads, dropout, attention_dropout,
activation_dropout, max_seq_len, num_segments, use_position_embeddings,
offset_positions_by_padding, encoder_normalize_before, apply_bert_init,
activation_fn, learned_pos_embedding, embed_scale, freeze_embeddings,
n_trans_layers_to_freeze, export
)
self.layers = nn.ModuleList(
[
SparseTransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
is_bidirectional=is_bidirectional,
stride=stride,
expressivity=expressivity,
)
for _ in range(num_encoder_layers)
]
)
def freeze_module_params(m):
if m is not None:
for p in m.parameters():
p.requires_grad = False
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
| 2,965 | 36.075 | 107 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/activation_select.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
def parse_activation(activation_type):
args = activation_type.split("_")
return args
def ActivationSelect(activation_type):
args = parse_activation(activation_type)
activation_type = args[0]
if activation_type == "relu":
return nn.ReLU()
elif activation_type == "leakyrelu":
return nn.LeakyReLU(negative_slope=float(args[1]))
#default 0.01
elif activation_type == "prelu":
return nn.PReLU(init=float(args[1]))
#default 0.25
elif activation_type =='celu':
return nn.CELU(alpha=float(args[1]))
#default 1
elif activation_type =='gelu':
return nn.GELU()
#no parameter
elif activation_type =='elu':
return nn.ELU(alpha=float(args[1]))
#default 1
elif activation_type =='identity':
return nn.Identity()
elif activation_type =='sigmoid':
return nn.Sigmoid()
elif activation_type =='silu':
#torch: 1.10才有
return nn.SiLU()
elif activation_type =='tanh':
return nn.Tanh()
else:
print("error ActivationSelect!")
exit()
| 1,317 | 28.288889 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/sinusoidal_positional_embedding.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Optional
import torch
import torch.onnx.operators
from fairseq import utils
from torch import Tensor, nn
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.onnx_trace = False
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.max_positions = int(1e5)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
if self.onnx_trace:
return (
self.weights.index_select(index=self.padding_idx + pos, dim=0)
.unsqueeze(1)
.repeat(bsz, 1, 1)
)
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
(bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
)
return embeddings
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
| 3,880 | 35.613208 | 87 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/lightweight_convolution.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules.unfold import unfold1d
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
def LightweightConv(input_size, kernel_size=1, padding_l=None, num_heads=1,
weight_dropout=0., weight_softmax=False, bias=False):
if torch.cuda.is_available():
try:
from fairseq.modules.lightconv_layer import LightconvLayer
return LightconvLayer(input_size, kernel_size=kernel_size,
padding_l=padding_l, num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax, bias=bias)
except ImportError as e:
print(e)
return LightweightConv1dTBC(input_size, kernel_size=kernel_size,
padding_l=padding_l, num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax, bias=bias)
class LightweightConv1d(nn.Module):
'''Lightweight Convolution assuming the input is BxCxT
This is just an example that explains LightConv clearer than the TBC version.
We don't use this module in the model.
Args:
input_size: # of channels of the input and output
kernel_size: convolution channels
padding: padding
num_heads: number of heads used. The weight is of shape
`(num_heads, 1, kernel_size)`
weight_softmax: normalize the weight with softmax before the convolution
Shape:
Input: BxCxT, i.e. (batch_size, input_size, timesteps)
Output: BxCxT, i.e. (batch_size, input_size, timesteps)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
'''
def __init__(self, input_size, kernel_size=1, padding=0, num_heads=1,
weight_softmax=False, bias=False, weight_dropout=0.):
super().__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.num_heads = num_heads
self.padding = padding
self.weight_softmax = weight_softmax
self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.)
def forward(self, input):
'''
input size: B x C x T
output size: B x C x T
'''
B, C, T = input.size()
H = self.num_heads
weight = self.weight
if self.weight_softmax:
weight = F.softmax(weight, dim=-1)
weight = self.weight_dropout_module(weight)
# Merge every C/H entries into the batch dimension (C = self.input_size)
# B x C x T -> (B * C/H) x H x T
# One can also expand the weight to C x 1 x K by a factor of C/H
# and do not reshape the input instead, which is slow though
input = input.view(-1, H, T)
output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)
output = output.view(B, C, T)
if self.bias is not None:
output = output + self.bias.view(1, -1, 1)
return output
@with_incremental_state
class LightweightConv1dTBC(nn.Module):
'''Lightweight Convolution assuming the input is TxBxC
Args:
input_size: # of channels of the input
kernel_size: convolution channels
padding_l: padding to the left when using "same" padding
num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
weight_dropout: the drop rate of the DropConnect to drop the weight
weight_softmax: normalize the weight with softmax before the convolution
bias: use bias
Shape:
Input: TxBxC, i.e. (timesteps, batch_size, input_size)
Output: TxBxC, i.e. (timesteps, batch_size, input_size)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
'''
def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1,
weight_dropout=0., weight_softmax=False, bias=False):
super().__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__)
self.weight_softmax = weight_softmax
self.weight = nn.Parameter(torch.Tensor(num_heads, 1, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.reset_parameters()
self.onnx_trace = False
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.)
def forward(self, x, incremental_state=None, unfold=False):
'''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
incremental_state: A dict to keep the state
unfold: unfold the input or not. If not, we use the matrix trick instead
'''
unfold = unfold or (incremental_state is not None)
if unfold:
output = self._forward_unfolded(x, incremental_state)
else:
output = self._forward_expanded(x, incremental_state)
if self.bias is not None:
output = output + self.bias.view(1, 1, -1)
return output
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def _forward_unfolded(self, x, incremental_state):
'''The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right.'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K)
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:])
x_unfold = x_unfold.view(T*B*H, R, -1)
else:
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, self.kernel_size, self.padding_l, 0)
x_unfold = x_unfold.view(T*B*H, R, K)
if self.weight_softmax:
weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2):]
K = weight.size(1)
weight = weight.view(1, H, K).expand(T*B, H, K).contiguous().view(T*B*H, K, 1)
weight = self.weight_dropout_module(weight)
output = torch.bmm(x_unfold, weight) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_state):
'''Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight.view(H, K)
if self.weight_softmax:
weight = utils.softmax(weight, dim=1, onnx_trace=self.onnx_trace).type_as(weight)
weight = weight.view(1, H, K).expand(T*B, H, K).contiguous()
weight = weight.view(T, B*H, K).transpose(0, 1)
x = x.view(T, B*H, R).transpose(0, 1)
P = self.padding_l
if K > T and P == K-1:
weight = weight.narrow(2, K-T, T)
K, P = T, T-1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False)
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T)
weight_expanded = self.weight_dropout_module(weight_expanded)
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def extra_repr(self):
s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, bias={}'.format(
self.input_size, self.kernel_size, self.padding_l,
self.num_heads, self.weight_softmax, self.bias is not None
)
if self.weight_dropout_module.p > 0.:
s += ', weight_dropout={}'.format(self.weight_dropout_module.p)
return s
| 10,496 | 39.844358 | 104 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/dynamic_convolution.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from .unfold import unfold1d
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
def DynamicConv(input_size, kernel_size=1, padding_l=None, num_heads=1,
weight_dropout=0., weight_softmax=False,
renorm_padding=False, bias=False, conv_bias=False,
query_size=None, in_proj=False):
if torch.cuda.is_available():
try:
from fairseq.modules.dynamicconv_layer import DynamicconvLayer
return DynamicconvLayer(input_size, kernel_size=kernel_size,
padding_l=padding_l, num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax, bias=bias)
except ImportError as e:
print(e)
return DynamicConv1dTBC(input_size, kernel_size=kernel_size,
padding_l=padding_l, num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax, bias=bias)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@with_incremental_state
class DynamicConv1dTBC(nn.Module):
'''Dynamic lightweight convolution taking T x B x C inputs
Args:
input_size: # of channels of the input
kernel_size: convolution channels
padding_l: padding to the left when using "same" padding
num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
weight_dropout: the drop rate of the DropConnect to drop the weight
weight_softmax: normalize the weight with softmax before the convolution
renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1)
bias: use bias
conv_bias: bias of the convolution
query_size: specified when feeding a different input as the query
in_proj: project the input and generate the filter together
Shape:
Input: TxBxC, i.e. (timesteps, batch_size, input_size)
Output: TxBxC, i.e. (timesteps, batch_size, input_size)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
'''
def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1,
weight_dropout=0., weight_softmax=False,
renorm_padding=False, bias=False, conv_bias=False,
query_size=None, in_proj=False):
super().__init__()
self.input_size = input_size
self.query_size = input_size if query_size is None else query_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__)
self.weight_softmax = weight_softmax
self.renorm_padding = renorm_padding
if in_proj:
self.weight_linear = Linear(self.input_size, self.input_size + num_heads * kernel_size * 1)
else:
self.weight_linear = Linear(self.query_size, num_heads * kernel_size * 1, bias=bias)
if conv_bias:
self.conv_bias = nn.Parameter(torch.Tensor(input_size))
else:
self.conv_bias = None
self.reset_parameters()
@property
def in_proj(self):
return self.weight_linear.out_features == self.input_size + self.num_heads * self.kernel_size
def reset_parameters(self):
self.weight_linear.reset_parameters()
if self.conv_bias is not None:
nn.init.constant_(self.conv_bias, 0.)
def forward(self, x, incremental_state=None, query=None, unfold=None):
'''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
incremental_state: A dict to keep the state
unfold: unfold the input or not. If not, we use the matrix trick instead
query: use the specified query to predict the conv filters
'''
unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory
unfold = unfold or (incremental_state is not None)
assert query is None or not self.in_proj
if query is None:
query = x
if unfold:
output = self._forward_unfolded(x, incremental_state, query)
else:
output = self._forward_expanded(x, incremental_state, query)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
def _forward_unfolded(self, x, incremental_state, query):
'''The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right.'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
if self.in_proj:
proj = self.weight_linear(x)
x = proj.narrow(2, 0, self.input_size).contiguous()
weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1)
else:
weight = self.weight_linear(query).view(T*B*H, -1)
# renorm_padding is only implemented in _forward_expanded
assert not self.renorm_padding or incremental_state is not None
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:])
x_unfold = x_unfold.view(T*B*H, R, -1)
else:
padding_l = self.padding_l
if K > T and padding_l == K-1:
weight = weight.narrow(1, K-T, T)
K, padding_l = T, T-1
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, K, padding_l, 0)
x_unfold = x_unfold.view(T*B*H, R, K)
if self.weight_softmax and not self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = weight.narrow(1, 0, K)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2):]
K = weight.size(1)
if self.weight_softmax and self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_stat, query):
'''Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
if self.in_proj:
proj = self.weight_linear(x)
x = proj.narrow(2, 0, self.input_size).contiguous()
weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1)
else:
weight = self.weight_linear(query).view(T*B*H, -1)
if not self.renorm_padding:
if self.weight_softmax:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
weight = weight.narrow(1, 0, K).contiguous()
weight = weight.view(T, B*H, K).transpose(0, 1)
x = x.view(T, B*H, R).transpose(0, 1)
if self.weight_softmax and self.renorm_padding:
# turn the convolution filters into band matrices
weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf'))
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, self.padding_l, T)
# normalize the weight over valid positions like self-attention
weight_expanded = F.softmax(weight_expanded, dim=2)
weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)
else:
P = self.padding_l
# For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length
if K > T and P == K-1:
weight = weight.narrow(2, K-T, T)
K, P = T, T-1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False)
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def extra_repr(self):
s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}'.format(
self.input_size, self.kernel_size, self.padding_l,
self.num_heads, self.weight_softmax, self.conv_bias is not None, self.renorm_padding,
self.in_proj,
)
if self.query_size != self.input_size:
s += ', query_size={}'.format(self.query_size)
if self.weight_dropout_module.p > 0.:
s += ', weight_dropout={}'.format(self.weight_dropout_module.p)
return s
| 11,057 | 43.95122 | 132 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/noise_dropout.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
import torch
logger = logging.getLogger(__name__)
class NoiseDropout(nn.Module):
def __init__(self, alpha):
super().__init__()
self.alpha = alpha
def forward(self, x, inplace: bool = False):
if self.training:
coef = (2*torch.rand_like(x)-1).to(x)
coef *= self.alpha
x = x*(1+coef)
return x
else:
return x
| 700 | 22.366667 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/my_attention.py
| 0 | 0 | 0 |
py
|
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/norm/mask_powernorm3d.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : MaskBatchNorm.py
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm
import numpy as np
from scipy import io
__all__ = ['MaskPowerNorm3d']
class MaskPowerNorm3d(nn.Module):
"""
An implementation of masked batch normalization, used for testing the numerical
stability.
"""
__count = 0
def update_cnt(self):
MaskPowerNorm3d.__count += 1
def __init__(self, num_features, eps=1e-5, momentum=0.05, \
affine=True, track_running_stats=True, sync_bn=True, process_group=None, \
with_seq = True, prefix = 'None', weight_affine=0, penalty_var=0, \
penalty_type='diff', learn_alpha=False, alpha=1):
super().__init__()
self.id = self.__count
self.prefix = prefix
#print(self.id)
self.update_cnt()
self.interval = 1100 #standard: 1100
#self.save_interval = 20
self.batchnum = 0
self.num_features = num_features
self.eps = eps
self.momentum = momentum #default0.05
self.affine = affine
self.track_running_stats = track_running_stats
self.maxlen = 600
self.with_seq = with_seq
self.learn_alpha = learn_alpha
if self.learn_alpha:
self.alpha = Parameter(torch.Tensor(1))
nn.init.constant_(self.alpha, alpha)
self.weight_affine = weight_affine
self.penalty_var = penalty_var
self.penalty_type = penalty_type
assert self.penalty_type in ['diff','mag','reldiffnorm','reldiffcosine','symkl'], "wrong penalty type for BN!"
#self.running_mean_list = []
#self.running_var_list = []
#self.batch_mean_list = []
#self.batch_var_list = []
self.batch_var_norm = []
self.running_var_norm = []
self.diff_var_norm = []
self.grad_proj_list = []
self.file = 'statistics/{}/bn_{}'.format(self.prefix,self.id)
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.sync_bn = sync_bn
# gpu_size is set through DistributedDataParallel initialization. This is to ensure that SyncBatchNorm is used
# under supported condition (single GPU per process)
self.process_group = process_group
self.ddp_gpu_size = 4
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
def record_forward(self):
diff_var_data = (self.running_var-self.batch_var).data
self.diff_var_norm.append(diff_var_data.norm().cpu().numpy().item())
self.batch_var_norm.append(self.batch_var.norm().cpu().numpy().item())
self.running_var_norm.append(self.running_var.norm().cpu().numpy().item())
if self.batchnum%self.interval==0:
savefile = "{}_forward_{}.mat".format(self.file,self.batchnum//self.interval)
d = {}
diff_var = np.array(self.diff_var_norm)
batch_var = np.array(self.batch_var_norm)
running_var = np.array(self.running_var_norm)
d['diff_var'] = diff_var
d['running_var'] = running_var
d['batch_var'] = batch_var
io.savemat(savefile, d)
self.batch_var_norm = []
self.running_var_norm = []
self.diff_var_norm = []
def backward_hook(self,grad):
#B, T, C
grad_proj = ((grad*self.mask)*self.x).sum(dim=(0,1))/self.sum_size**0.5
#self.grad_mean_list.append(grad_mean.data.cpu().reshape([1,-1]))
#self.grad_proj_list.append(grad_proj.data.cpu().reshape([1,-1]))
self.grad_proj_list.append(grad_proj.norm().cpu().numpy().item())
#print(grad_mean.shape,grad_proj.shape);exit()
if self.batchnum%self.interval==0:
savefile = "{}_backward_{}.mat".format(self.file,self.batchnum//self.interval)
d = {}
#grad_mean_arr = torch.cat(self.grad_mean_list,dim=0)
#grad_proj_arr = torch.cat(self.grad_proj_list,dim=0)
d['grad_proj'] = np.array(self.grad_proj_list)
from scipy import io
io.savemat(savefile, d)
self.grad_proj_list = []
def loss(self):
loss = 0
assert self.training==True, "wrongly adding BN inconsistent loss!"
if self.penalty_var==0:
return loss
if self.penalty_type=='diff':
loss = self.loss_diff(loss)
if self.penalty_type=='mag':
loss = self.loss_magnitude(loss)
if self.penalty_type=='reldiffnorm':
loss = self.loss_rel_diff_norm(loss)
if self.penalty_type=='reldiffcosine':
loss = self.loss_rel_diff_cosine(loss)
if self.penalty_type=='symkl':
loss = self.sym_kl_loss(loss)
return loss
def loss_diff(self, loss):
if self.weight_affine:
loss += self.penalty_var*((torch.abs(self.running_var-self.batch_var))*self.weight.detach()).sum()
#print(loss) #loss: 初始十几,训了一小会,零点几,1,2的居多,偶尔有9
else:
loss += self.penalty_var*(torch.abs(self.running_var-self.batch_var)).sum()
return loss
def loss_rel_diff_norm(self, loss):
loss += self.penalty_var*(torch.abs(self.running_var-self.batch_var)/(self.running_var.norm(p=1)).sum()+1)
return loss
def loss_rel_diff_cosine(self, loss):
loss -= self.penalty_var*(self.running_var*self.batch_var).sum()/self.batch_var.norm()/self.running_var.norm()
#loss -= self.penalty_var*torch.sqrt((self.running_var*self.batch_var)).sum()/torch.sqrt(self.batch_var.sum()*self.running_var.sum())
return loss
def loss_magnitude(self, loss):
loss += self.penalty_var*(torch.abs(self.batch_var)).sum()
return loss
def sym_kl_loss(self,loss):
item1 = self.running_var/(self.batch_var+self.eps)+self.batch_var/(self.running_var+self.eps)
loss += self.penalty_mean*(item1.sum())
return loss
def forward(self, input, pad_mask=None, is_encoder=False, update_run=True):
"""
input: T x B x C -> B x C x T
: B x C x T -> T x B x C
pad_mask: B x T (padding is True)
"""
T, B, C = input.shape
input = input.contiguous()
#print(input.shape) #21, 192, 512
input = input.permute(1,0,2) #T,B,C-->B,T,C
#print(pad_mask.shape,input.shape)#torch.Size([192, 21]) torch.Size([192, 21, 512])
#print(~pad_mask);exit()#true: 有mask, false:无mask
if pad_mask is not None:
mask = 1-pad_mask.unsqueeze(dim=-1).type_as(input)
else:
mask = torch.ones(B,T,1).cuda()
#if not self.training:
# mask = torch.ones(B,T,1).cuda()
#else:
# print("error bn pad mask!")
# print(self.id)
# print(pad_mask)
# exit()
input = input*mask
# Compute the sum and square-sum.
sum_size = mask.sum()
#print(sum_size,input.size(0)*input.size(1),sum_size/input.size(0)/input.size(1));exit() #4032
input_ssum = (input**2).sum(dim=(0,1),keepdim=True)
# # Compute the statistics
if self.training:
self.batchnum += 1
self.mask = mask
self.sum_size = sum_size #相当于求统计量的batch size
bias_var = input_ssum/sum_size
#用于计算running statistics
self.batch_var = bias_var.squeeze()
with torch.no_grad():
stat_var = bias_var.squeeze()
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * stat_var.data
self.record_forward()
else:
bias_var = self.running_var
input = (input)/torch.sqrt(bias_var.clamp(self.eps))
if self.training:
#input.register_hook(self.backward_hook)
pass
if self.training:
self.x = input.data*mask
if self.affine:
output = input*self.weight + self.bias
else:
output = input
output = output.permute(1, 0, 2).contiguous() #B,T,C-->T,B,C
return output
| 9,403 | 38.512605 | 141 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/norm/mask_anchornorm.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : MaskBatchNorm.py
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
from scipy import io
__all__ = ['MaskAnchorNorm']
def min_len(tensor):
#tensor: [T,B,C]
tokens = (tensor!=0).all(dim=-1)
length = tokens.sum(dim=0).long()
return length.min()
class MaskAnchorNorm(nn.Module):
"""
An implementation of masked batch normalization, used for testing the numerical
stability.
"""
__count = 0
def update_cnt(self):
MaskAnchorNorm.__count += 1
def __init__(self, num_features, eps=1e-5, momentum=0.05, affine=True, position=0,
num=1, with_seq = False, prefix = 'None'):
super().__init__()
self.id = self.__count
self.prefix = prefix
self.position = position
self.num = num
#print(self.id)
self.update_cnt()
self.num_features = num_features
self.eps = eps
self.affine = affine
self.momentum = momentum
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def extra_repr(self):
return '{num_features}, eps={eps}, affine={affine}'.format(**self.__dict__)
def forward(self, input, pad_mask=None, is_encoder=False):
"""
input: T x B x C -> B x C x T
: B x C x T -> T x B x C
pad_mask: B x T (padding is True)
"""
T, B, C = input.shape
num = min(min_len(input)-self.position,self.num)
#input = input.contiguous()
if self.training:
data = input[self.position:self.position+num,:,:]
var, mean = torch.var_mean(data,dim=(0,1))
with torch.no_grad():
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * var.data
else:
mean, var = self.running_mean, self.running_var
input = (input - mean)/torch.sqrt(var.clamp(self.eps))
if self.affine:
output = input*self.weight + self.bias
else:
output = input
return output
| 2,951 | 32.168539 | 103 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/norm/mask_groupnorm.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : groupnorm.py
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
def tile(a, repeats, dim):
"""
Substitute for numpy's repeat function. Taken from https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853/2
torch.repeat([1,2,3], 2) --> [1, 2, 3, 1, 2, 3]
np.repeat([1,2,3], repeats=2, axis=0) --> [1, 1, 2, 2, 3, 3]
:param a: tensor
:param repeats: number of repeats
:param dim: dimension where to repeat
:return: tensor with repitions
"""
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = repeats
a = a.repeat(*(repeat_idx))
if a.is_cuda: # use cuda-device if input was on cuda device already
order_index = torch.cuda.LongTensor(
torch.cat([init_dim * torch.arange(repeats, device=a.device) + i for i in range(init_dim)]))
else:
order_index = torch.LongTensor(torch.cat([init_dim * torch.arange(repeats) + i for i in range(init_dim)]))
return torch.index_select(a, dim, order_index)
class GroupNorm(nn.Module):
r"""Applies Group Normalization over a mini-batch of inputs as described in
the paper `Group Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The input channels are separated into :attr:`num_groups` groups, each containing
``num_channels / num_groups`` channels. The mean and standard-deviation are calculated
separately over the each group. :math:`\gamma` and :math:`\beta` are learnable
per-channel affine transform parameter vectors of size :attr:`num_channels` if
:attr:`affine` is ``True``.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, C, *)` where :math:`C=\text{num\_channels}`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = nn.GroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = nn.GroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = nn.GroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight',
'bias']
def __init__(self, num_channels, num_groups=2, eps=1e-5, affine=True, subtract_type="mean", robust_mean=0, robust_std=0):
super(GroupNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.group_feature = num_channels // num_groups
self.subtract_type = subtract_type
self.robust_mean = robust_mean
self.robust_std = robust_std
assert subtract_type in ['mean','none','median'], "wrong subtract_type!"
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_channels))
self.bias = nn.Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
init.ones_(self.weight)
#init.zeros_(self.weight)
init.zeros_(self.bias)
def forward(self, input, pad_mask=None, is_encoder=False):
# input: only reudce over the C dim.
shaped_input = (len(input.shape) == 2)
if shaped_input:
input = input.unsqueeze(0)
T, B, C = input.shape
# Permute the mask_input to (B, T, C)
# mask_input = input.transpose(0, 1)
# # Compute the mean, var for LN, size to be BxTx1 -> BxCxT
# # Split the mask_input into group
# gn_input = mask_input.view(B, T, self.num_groups, self.group_feature)
# gn_input = gn_input.permute(1, 2, 3, 0).contiguous().view(T, self.num_groups, self.group_feature * B)
# # TxGx1 -> TxC -> BxTxC -> BxCxT
# mean_gn = tile(gn_input.mean(-1, keepdim=True).squeeze(-1), self.group_feature, -1).expand_as(mask_input).transpose(1, 2)
# var_gn = tile(gn_input.var(-1, keepdim=True).squeeze(-1), self.group_feature, -1).expand_as(mask_input).transpose(1, 2)
#
# # Resize the input to (B, C, -1).
# input = input.permute(1, 2, 0).contiguous()
# input_shape = input.size()
# input = input.view(input.size(0), self.num_channels, -1)
#
# input = (input - mean_gn) / (var_gn + self.eps).sqrt()
# input = input * (self.weight).unsqueeze(-1) + (self.bias).unsqueeze(-1)
# input = input.view(B, C, T)
# input = input.permute(2, 0, 1).contiguous()
# return input
input = input.contiguous().view(-1, self.group_feature)
'''
input = input.contiguous().view(-1, self.group_num, self.group_feature)
std, subtract_term = torch.std_mean(input,dim=2,unbiased=False,keepdim=True)
if self.subtract_type=='median':
subtract_term, _ = torch.median(input,dim=2,keepdim=True)
#var_of_mean = torch.var(subtract_term,dim=1,unbiased=False,keepdim=True)
if self.robust_mean==1:
subtract_term, _ = torch.median(subtract_term,dim=1,keepdim=True) #robust mean
std = torch.std(input,dim=(1,2),unbiased=False,keepdim=True)
#if self.robust_std==1:
# std, _ = torch.median(std,dim=1,keepdim=True) #robust std #可能还少了一项:条件均值的方差
#std = (std**2+var_of_mean**2)**0.5
if self.subtract_type!='none':
input = input-subtract_term
input = input/std #有问题!
'''
mean = input.mean(dim=1,keepdim=True)
#var = ((input-mean) ** 2).mean(dim=2,keepdim=True)+self.eps
var = input.var(dim=1,keepdim=True)+self.eps
#s = torch.sqrt(var)
#inv_std = var**-0.5
##inv_std = 1/torch.sqrt(var)
#output = (input - mean) * inv_std
input = (input - mean) / torch.sqrt(var)
##暂时median后接mean的情况没考虑
input = input.contiguous().view(-1, C)
if self.affine:
input = input * (self.weight).unsqueeze(0) + (self.bias).unsqueeze(0)
#input = F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
input = input.contiguous().view(T, B, C)
if shaped_input:
input = input.squeeze(0)
return input
def extra_repr(self):
return '{num_groups}, {num_channels}, eps={eps}, ' \
'affine={affine}, subtract_type={subtract_type}'.format(**self.__dict__)
| 7,391 | 43 | 131 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/norm/mask_layernorm3d.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : MaskBatchNorm.py
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm
import numpy as np
from scipy import io
__all__ = ['MaskLayerNorm3d']
class MaskLayerNorm3d(nn.Module):
"""
An implementation of masked batch normalization, used for testing the numerical
stability.
"""
__count = 0
def update_cnt(self):
MaskLayerNorm3d.__count += 1
def __init__(self, num_features, eps=1e-5, \
affine=True, with_seq = False, prefix = 'None'):
super().__init__()
self.id = self.__count
self.prefix = prefix
self.update_cnt()
self.num_features = num_features
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def extra_repr(self):
return '{num_features}, eps={eps}, affine={affine}'.format(**self.__dict__)
def forward(self, input, pad_mask=None):
"""
input: T x B x C -> B x C x T
: B x C x T -> T x B x C
pad_mask: B x T (padding is True)
"""
#print(input.shape)#torch.Size([26, 2, 128])
T, B, C = input.shape
#input = input.contiguous()
# construct the mask_input, size to be (BxL) x C: L is the real length here
# Compute the statistics
mean = input.mean(dim=2,keepdim=True)
#var = ((input-mean) ** 2).mean(dim=2,keepdim=True)+self.eps
var = input.var(dim=2,keepdim=True)+self.eps
output = (input - mean) / torch.sqrt(var)
if self.affine:
output = output*self.weight + self.bias
return output
| 2,211 | 32.515152 | 83 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/norm/mask_groupscale.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : MaskPowerNorm.py
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
__all__ = ['MaskGruopScale']
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
class GroupScaling1D(nn.Module):
r"""Scales inputs by the second moment for the entire layer.
"""
def __init__(self, eps=1e-5, group_num=4):
super(GroupScaling1D, self).__init__()
self.eps = eps
self.group_num = group_num
def extra_repr(self):
return f'eps={self.eps}, group={self.group_num}'
def forward(self, input):
# calculate second moment
# different group use different mean
T, B, C = input.shape[0], input.shape[1], input.shape[2]
Cg = C // self.group_num
gn_input = input.contiguous().reshape(T, B, self.group_num, Cg)
moment2 = torch.repeat_interleave( torch.mean(gn_input * gn_input, dim=3, keepdim=True), \
repeats=Cg, dim=-1).contiguous().reshape(T, B, C)
# divide out second moment
return input / torch.sqrt(moment2 + self.eps)
def _unsqueeze_ft(tensor):
"""add new dimensions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
class PowerFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight, bias, running_phi, eps, afwd, abkw, ema_gz, \
debug, warmup_iters, current_iter, mask_x):
ctx.eps = eps
ctx.debug = debug
current_iter = current_iter.item()
ctx.current_iter = current_iter
ctx.warmup_iters = warmup_iters
ctx.abkw = abkw
rmax = 1
N, C, H, W = x.size()
x2 = (mask_x * mask_x).mean(dim=0)
var = x2.reshape(1, C, 1, 1)
if current_iter <= warmup_iters:
z = x /(var + eps).sqrt()
else:
z = x /(running_phi + eps).sqrt()
y = z
ctx.save_for_backward(z, var, weight, ema_gz)
if current_iter < warmup_iters:
running_phi.copy_(running_phi * (current_iter-1)/current_iter + var.mean(dim=0, keepdim=True)/current_iter)
running_phi.copy_(afwd*running_phi + (1-afwd)*var.mean(dim=0, keepdim=True))
y = weight.reshape(1,C,1,1) * y + bias.reshape(1,C,1,1)
return y
@staticmethod
def backward(ctx, grad_output):
eps = ctx.eps
debug = ctx.debug
current_iter = ctx.current_iter
warmup_iters = ctx.warmup_iters
abkw = ctx.abkw
N, C, H, W = grad_output.size()
z, var, weight, ema_gz = ctx.saved_variables
y = z
g = grad_output * weight.reshape(1, C, 1, 1)
g = g * 1
gz = (g * z).mean(dim=3).mean(dim=2).mean(dim=0)
approx_grad_g = (g - (1 - abkw) * ema_gz * z)
ema_gz.add_((approx_grad_g * z).mean(dim=3, keepdim=True).mean(dim=2, keepdim=True).mean(dim=0, keepdim=True))
gx = 1. / torch.sqrt(var + eps) * approx_grad_g
return gx, (grad_output * y).sum(dim=3).sum(dim=2).sum(dim=0), grad_output.sum(dim=3).sum(dim=2).sum(dim=0), \
None, None, None, None, None, None, None, None, None, None
class MaskGroupScale(nn.Module):
"""
An implementation of masked batch normalization, used for testing the numerical
stability.
"""
def __init__(self, num_features, eps=1e-5, alpha_fwd=0.9, alpha_bkw=0.9, \
affine=True, warmup_iters=4000, group_num=1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.affine = affine
self.register_parameter('weight', nn.Parameter(torch.ones(num_features)))
self.register_parameter('bias', nn.Parameter(torch.zeros(num_features)))
self.register_buffer('running_phi', torch.ones(1,num_features,1,1))
self.register_buffer('ema_gz', torch.zeros(1,num_features,1,1))
self.register_buffer('iters', torch.zeros(1).type(torch.LongTensor))
self.afwd = alpha_fwd
self.abkw = alpha_bkw
self.eps = eps
self.debug = False
self.warmup_iters = warmup_iters
self.gp = GroupScaling1D(group_num=group_num)
self.group_num = group_num
def extra_repr(self):
return '{num_features}, eps={eps}, alpha_fwd={afwd}, alpha_bkw={abkw}, ' \
'affine={affine}, warmup={warmup_iters}, group_num={group_num}'.format(**self.__dict__)
def forward(self, input, pad_mask=None, is_encoder=False):
"""
input: T x B x C -> B x C x T
: B x C x T -> T x B x C
pad_mask: B x T (padding is True)
"""
shaped_input = (len(input.shape) == 2)
if shaped_input:
input = input.unsqueeze(0)
T, B, C = input.shape
input = self.gp(input)
return input
# construct the mask_input, size to be (BxL) x C: L is the real length here
if pad_mask is None:
mask_input = input.clone()
else:
# Transpose the bn_mask (B x T -> T x B)
bn_mask = ~pad_mask
bn_mask = bn_mask.transpose(0, 1)
if pad_mask is not None:
pad_size = (~bn_mask).sum()
mask_input = input[bn_mask, :]
else:
mask_input = input.clone()
mask_input = mask_input.reshape(-1, self.num_features)
input = input.permute(1, 2, 0).contiguous()
input_shape = input.size()
input = input.reshape(input.size(0), self.num_features, -1)
input = input.unsqueeze(-1)
if self.training:
self.iters.copy_(self.iters + 1)
output = PowerFunction.apply(input, self.weight, self.bias, self.running_phi, self.eps, \
self.afwd, self.abkw, self.ema_gz, self.debug, self.warmup_iters, self.iters, mask_input)
else:
N, C, H, W = input.size()
var = self.running_phi
output = input / (var + self.eps).sqrt()
output = self.weight.reshape(1,C,1,1) * output + self.bias.reshape(1,C,1,1)
output = output.reshape(input_shape)
output = output.permute(2, 0, 1).contiguous()
# Reshape it.
if shaped_input:
output = output.squeeze(0)
return output
| 6,418 | 34.076503 | 119 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/norm/mask_identity.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : MaskBatchNorm.py
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
__all__ = ['MaskIdentity']
class MaskIdentityNorm(nn.Module):
"""
"""
def __init__(self, num_features,affine=False):
super().__init__()
self.num_features = num_features
self.affine = affine
def extra_repr(self):
return '{num_features},' \
'affine={affine}'.format(**self.__dict__)
def forward(self, input, pad_mask=None, is_encoder=False):
"""
input: T x B x C -> B x C x T
: B x C x T -> T x B x C
pad_mask: B x T (padding is True)
"""
return input
| 790 | 23.71875 | 62 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/norm/mask_batchnorm3d.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : MaskBatchNorm.py
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm
import numpy as np
from scipy import io
__all__ = ['MaskBatchNorm3d']
class MaskBatchNorm3d(nn.Module):
"""
An implementation of masked batch normalization, used for testing the numerical
stability.
"""
__count = 0
def update_cnt(self):
MaskBatchNorm3d.__count += 1
def __init__(self, num_features, eps=1e-5, momentum=0.05, \
affine=True, track_running_stats=True, sync_bn=True, process_group=None, \
with_seq = True, prefix = 'None', weight_affine=0, penalty_mean=0, penalty_var=0, \
penalty_type='diff', learn_alpha=False, alpha=1, normalize=True):
super().__init__()
self.id = self.__count
self.prefix = prefix
#print(self.id)
self.update_cnt()
self.interval = 1100 #standard: 1100
#self.save_interval = 20
self.batchnum = 0
self.num_features = num_features
self.eps = eps
self.momentum = momentum #default0.05
self.affine = affine
self.track_running_stats = track_running_stats
self.maxlen = 600
self.with_seq = with_seq
self.learn_alpha = learn_alpha
self.normalize = normalize
if self.learn_alpha:
self.alpha = Parameter(torch.Tensor(1))
nn.init.constant_(self.alpha, alpha)
self.weight_affine = weight_affine
self.penalty_mean = penalty_mean
self.penalty_var = penalty_var
self.penalty_type = penalty_type
assert self.penalty_type in ['diff','diff2','reldiff','mag','reldiffnorm','reldiffcosine','symkl'], "wrong penalty type for BN!"
#self.running_mean_list = []
#self.running_var_list = []
#self.batch_mean_list = []
#self.batch_var_list = []
self.batch_mean_norm = []
self.batch_var_norm = []
self.batch_sigma_norm = []
self.running_mean_norm = []
self.running_var_norm = []
self.running_sigma_norm = []
self.diff_mean_norm = []
self.diff_var_norm = []
self.diff_sigma_norm = []
self.mean_tid_list = []
self.var_tid_list = []
self.grad_mean_list = []
self.grad_proj_list = []
self.exp_var_list = []
self.var_exp_list = []
self.shape_list = []
self.file = 'statistics/{}/bn_{}'.format(self.prefix,self.id)
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
#self.register_buffer('momentum_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
#self.register_buffer('momentum_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
'''
self.register_buffer('running_mean', torch.zeros(self.maxlen,num_features))
self.register_buffer('running_var', torch.ones(self.maxlen,num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
'''
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.sync_bn = sync_bn
# gpu_size is set through DistributedDataParallel initialization. This is to ensure that SyncBatchNorm is used
# under supported condition (single GPU per process)
self.process_group = process_group
self.ddp_gpu_size = 4
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
def record_forward(self):
diff_mean_data = (self.running_mean-self.batch_mean).data
diff_var_data = (self.running_var-self.batch_var).data
self.running_sigma, self.batch_sigma = torch.sqrt(self.running_var.data), torch.sqrt(self.batch_var.data)
var_tid = (self.running_sigma/self.batch_sigma-1).abs().mean()
mean_tid = (diff_mean_data/self.batch_sigma).abs().mean()
self.mean_tid_list.append(mean_tid.cpu().numpy().item())
self.var_tid_list.append(var_tid.cpu().numpy().item())
diff_sigma_data = (self.running_sigma-self.batch_sigma).data
self.diff_mean_norm.append(diff_mean_data.norm().cpu().numpy().item())
self.diff_var_norm.append(diff_var_data.norm().cpu().numpy().item())
self.diff_sigma_norm.append(diff_sigma_data.norm().cpu().numpy().item())
self.batch_mean_norm.append(self.batch_mean.norm().cpu().numpy().item())
self.batch_var_norm.append(self.batch_var.norm().cpu().numpy().item())
self.batch_sigma_norm.append(self.batch_sigma.norm().cpu().numpy().item())
self.running_mean_norm.append(self.running_mean.norm().cpu().numpy().item())
self.running_var_norm.append(self.running_var.norm().cpu().numpy().item())
self.running_sigma_norm.append(self.running_sigma.norm().cpu().numpy().item())
self.exp_var_list.append(self.exp_var.norm().cpu().numpy().item())
self.var_exp_list.append(self.var_exp.norm().cpu().numpy().item())
self.shape_list.append(self.shape[:2])
#print(self.batch_var[:5])
#print(self.exp_var[:5])
#print(self.var_exp[:5])
#exit()
if self.batchnum%self.interval==0:
savefile = "{}_forward_{}.mat".format(self.file,self.batchnum//self.interval)
d = {}
diff_mean = np.array(self.diff_mean_norm)
diff_var = np.array(self.diff_var_norm)
diff_sigma = np.array(self.diff_sigma_norm)
batch_mean = np.array(self.batch_mean_norm)
batch_var = np.array(self.batch_var_norm)
batch_sigma = np.array(self.batch_sigma_norm)
running_mean = np.array(self.running_mean_norm)
running_var = np.array(self.running_var_norm)
running_sigma = np.array(self.running_sigma_norm)
mean_tid = np.array(self.mean_tid_list)
var_tid = np.array(self.var_tid_list)
exp_var = np.array(self.exp_var_list)
var_exp = np.array(self.var_exp_list)
shape = np.array(self.shape_list)
d['diff_mean'] = diff_mean
d['diff_var'] = diff_var
d['diff_sigma'] = diff_sigma
d['running_mean'] = running_mean
d['running_var'] = running_var
d['running_sigma'] = running_sigma
d['batch_mean'] = batch_mean
d['batch_var'] = batch_var
d['batch_sigma'] = batch_sigma
d['mean_tid'] = mean_tid
d['var_tid'] = var_tid
d['exp_var'] = exp_var
d['var_exp'] = var_exp
d['shape'] = shape
io.savemat(savefile, d)
self.batch_mean_norm = []
self.batch_var_norm = []
self.batch_sigma_norm = []
self.running_mean_norm = []
self.running_var_norm = []
self.running_sigma_norm = []
self.diff_mean_norm = []
self.diff_var_norm = []
self.diff_sigma_norm = []
self.mean_tid_list = []
self.var_tid_list = []
self.exp_var_list = []
self.var_exp_list = []
self.shape_list = []
def backward_hook(self,grad):
#B, T, C
grad_mean = (grad*self.mask).sum(dim=(0,1))/self.sum_size**0.5
grad_proj = ((grad*self.mask)*self.x).sum(dim=(0,1))/self.sum_size**0.5
#self.grad_mean_list.append(grad_mean.data.cpu().reshape([1,-1]))
#self.grad_proj_list.append(grad_proj.data.cpu().reshape([1,-1]))
self.grad_mean_list.append(grad_mean.norm().cpu().numpy().item())
self.grad_proj_list.append(grad_proj.norm().cpu().numpy().item())
#print(grad_mean.shape,grad_proj.shape);exit()
if self.batchnum%self.interval==0:
savefile = "{}_backward_{}.mat".format(self.file,self.batchnum//self.interval)
d = {}
#grad_mean_arr = torch.cat(self.grad_mean_list,dim=0)
#grad_proj_arr = torch.cat(self.grad_proj_list,dim=0)
d['grad_mean'] = np.array(self.grad_mean_list)
d['grad_proj'] = np.array(self.grad_proj_list)
from scipy import io
io.savemat(savefile, d)
self.grad_mean_list = []
self.grad_proj_list = []
def loss(self):
loss = 0
assert self.training==True, "wrongly adding BN inconsistent loss!"
if self.penalty_mean==0 and self.penalty_var==0:
return loss
if self.penalty_type=='diff':
loss = self.loss_diff(loss)
if self.penalty_type=='diff2':
loss = self.loss_diff2(loss)
if self.penalty_type=='reldiff':
loss = self.loss_rel_diff(loss)
if self.penalty_type=='mag':
loss = self.loss_magnitude(loss)
if self.penalty_type=='reldiffnorm':
loss = self.loss_rel_diff_norm(loss)
if self.penalty_type=='reldiffcosine':
loss = self.loss_rel_diff_cosine(loss)
if self.penalty_type=='symkl':
loss = self.sym_kl_loss(loss)
return loss
def loss_diff(self, loss):
if self.weight_affine:
loss += self.penalty_mean*(((self.running_mean-self.batch_mean)**2)*self.weight.detach()).sum()
loss += self.penalty_var*((torch.abs(self.running_var-self.batch_var))*self.weight.detach()).sum()
#print(loss) #loss: 初始十几,训了一小会,零点几,1,2的居多,偶尔有9
else:
loss += self.penalty_mean*((self.running_mean-self.batch_mean)**2).sum()
loss += self.penalty_var*(torch.abs(self.running_var-self.batch_var)).sum()
return loss
def loss_diff2(self, loss):
self.running_sigma, self.batch_sigma = torch.sqrt(self.running_var), torch.sqrt(self.batch_var)
loss += self.penalty_mean*((self.running_mean-self.batch_mean)**2).sum()
loss += self.penalty_var*((self.running_sigma-self.batch_sigma)**2).sum()
return loss
def loss_rel_diff(self, loss):
self.running_sigma, self.batch_sigma = torch.sqrt(self.running_var), torch.sqrt(self.batch_var)
loss += self.penalty_mean*(((self.running_mean-self.batch_mean)/self.running_sigma)**2).sum()
loss += self.penalty_var*((1-self.batch_sigma/self.running_sigma)**2).sum()
return loss
def loss_rel_diff_norm(self, loss):
loss += self.penalty_mean*(((self.running_mean-self.batch_mean)/self.running_mean.norm())**2).sum()
loss += self.penalty_var*(torch.abs(self.running_var-self.batch_var)/self.running_var.norm(p=1)).sum()
return loss
def loss_rel_diff_cosine(self, loss):
loss -= self.penalty_mean*(self.running_mean*self.batch_mean).sum()/self.batch_mean.norm()/self.running_mean.norm()
loss -= self.penalty_var*(self.running_var*self.batch_var).sum()/self.batch_var.norm()/self.running_var.norm()
#loss -= self.penalty_var*torch.sqrt((self.running_var*self.batch_var)).sum()/torch.sqrt(self.batch_var.sum()*self.running_var.sum())
return loss
def loss_magnitude(self, loss):
loss += self.penalty_mean*((self.batch_mean)**2).sum()
loss += self.penalty_var*(torch.abs(self.batch_var)).sum()
return loss
def sym_kl_loss(self,loss):
item1 = self.running_var/(self.batch_var+self.eps)+self.batch_var/(self.running_var+self.eps)
item2 = ((self.running_mean-self.batch_mean)**2)*(1/(self.batch_var+self.eps)+1/(self.running_var+self.eps))
loss += self.penalty_mean*(item1.sum()+item2.sum())
return loss
def forward(self, input, pad_mask=None, is_encoder=False, update_run=True):
"""
input: T x B x C -> B x C x T
: B x C x T -> T x B x C
pad_mask: B x T (padding is True)
"""
T, B, C = input.shape
#if self.id==0:
# print(input.shape)
input = input.contiguous()
#print(input.shape) #21, 192, 512
input = input.permute(1,0,2) #T,B,C-->B,T,C
#print(pad_mask.shape,input.shape)#torch.Size([192, 21]) torch.Size([192, 21, 512])
#print(~pad_mask);exit()#true: 有mask, false:无mask
if pad_mask is not None:
mask = 1-pad_mask.unsqueeze(dim=-1).type_as(input)
else:
mask = torch.ones(B,T,1).cuda()
#if not self.training:
# mask = torch.ones(B,T,1).cuda()
#else:
# print("error bn pad mask!")
# print(self.id)
# print(pad_mask)
# exit()
input = input*mask
# Compute the sum and square-sum.
sum_size = mask.sum()
#print(sum_size,input.size(0)*input.size(1),sum_size/input.size(0)/input.size(1));exit() #4032
input_sum = input.sum(dim=(0,1),keepdim=True)
input_ssum = (input**2).sum(dim=(0,1),keepdim=True)
# # Compute the statistics
if self.training:
self.shape = [B,T,C]
self.L_sum = input.sum(dim=1, keepdim=True)
self.L_ssum = (input**2).sum(dim=1, keepdim=True)
self.L_sum_size = mask.sum(dim=1, keepdim=True)
self.L_mean = self.L_sum/self.L_sum_size
self.L_m2 = self.L_ssum/self.L_sum_size
self.L_var = self.L_m2-self.L_mean**2
self.exp_var = self.L_var.mean(dim=(0,1))
self.var_exp = self.L_mean.var(dim=(0,1),unbiased=False)
self.batchnum += 1
self.mask = mask
self.sum_size = sum_size #相当于求统计量的batch size
mean = input_sum/sum_size
bias_var = input_ssum/sum_size-mean**2
#if self.learn_alpha:
# mean = self.alpha*mean
# bias_var = bias_var+((self.alpha-1)*mean)**2
#用于计算running statistics
self.batch_mean = mean.squeeze()
self.batch_var = bias_var.squeeze()
with torch.no_grad():
stat_mean = mean.squeeze()
stat_var = bias_var.squeeze()
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * stat_mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * stat_var.data
#self.momentum_mean = (1 - self.momentum) * self.momentum_mean + self.momentum * (stat_mean.data-self.running_mean)
#self.momentum_var = (1 - self.momentum) * self.momentum_var + self.momentum * (stat_var.data-self.running_var)
#self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * self.momentum_mean
#self.running_var = (1 - self.momentum) * self.running_var + self.momentum * self.momentum_var
self.record_forward()
else:
mean, bias_var = self.running_mean, self.running_var
if self.normalize:
input = (input - mean)/torch.sqrt(bias_var.clamp(self.eps))
#x=(input-mean)/sigma
if self.training:
#input.register_hook(self.backward_hook)
pass
if self.training:
self.x = input.data*mask
if self.affine:
output = input*self.weight + self.bias
else:
output = input
output = output.permute(1, 0, 2).contiguous() #B,T,C-->T,B,C
return output
| 16,604 | 41.686375 | 141 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/dynamicconv_layer/cuda_function_gen.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
blocks = [32, 64, 128, 256]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
std::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = weight.size(1);
const auto filterSize = weight.size(2);
const auto numFiltersInBlock = numFeatures / numHeads;
const dim3 blocks(minibatch, numFeatures);
auto output = at::zeros_like(input);
auto stream = at::cuda::getCurrentCUDAStream();
"""
switch = """
switch(filterSize) {
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {pad}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{
dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
output.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;
}
break;\n
"""
end = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;
}
return {output};
}
"""
with open("dynamicconv_cuda_forward.cu", 'w') as forward:
forward.write(head)
forward.write(switch)
for k in kernels:
b_size = 32
for b in blocks:
if b > k:
b_size = b
break
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=b_size, pad=pad))
forward.write(bad_padding)
forward.write(end)
def gen_backward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
thresh = [512, 512, 512, 512, 512, 380, 256, 256]
min_block = [64, 64, 64, 64, 64, 64, 128, 256]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = weight.size(1);
const auto filterSize = weight.size(2);
const auto numFiltersInBlock = numFeatures / numHeads;
auto numChunks = 1;
auto gradInput = at::zeros_like(input);
auto gradWeight = at::zeros_like(weight);
auto stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(minibatch, numHeads, numChunks);
"""
sequence_if = """
if (sequenceLength < {seq}) {{
switch(filterSize) {{
"""
case_k = """
case {k}:
"""
chunks_reset = """
numChunks = int(ceilf(sequenceLength/float({b_size})));
blocks = dim3(minibatch, numHeads, numChunks);
"""
main_block = """
if (padding_l == {p}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{
dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;\n
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
last_return = """
}
return {gradInput, gradWeight};
}
"""
with open("dynamicconv_cuda_backward.cu", 'w') as backward:
backward.write(head)
for seq in seqs:
backward.write(sequence_if.format(seq=seq))
for k, t, m in zip(kernels, thresh, min_block):
backward.write(case_k.format(k=k))
if seq <= t:
b_size = seq
else:
b_size = m
backward.write(chunks_reset.format(b_size=b_size))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=b_size, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(con_else)
backward.write(final_else)
for k, m in zip(kernels, min_block):
backward.write(case_k.format(k=k))
backward.write(chunks_reset.format(b_size=m))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=m, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(last_return)
if __name__ == "__main__":
gen_forward()
gen_backward()
| 6,866 | 29.65625 | 126 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/dynamicconv_layer/setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
setup(
name='dynamicconv_layer',
ext_modules=[
CUDAExtension(
name='dynamicconv_cuda',
sources=[
'dynamicconv_cuda.cpp',
'dynamicconv_cuda_kernel.cu',
],
),
],
cmdclass={
'build_ext': BuildExtension
})
| 613 | 24.583333 | 67 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/dynamicconv_layer/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .dynamicconv_layer import DynamicconvLayer # noqa
| 234 | 32.571429 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.autograd import Function
import torch.nn.functional as F
import dynamicconv_cuda
from fairseq import utils
from fairseq.modules.unfold import unfold1d
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
class dynamicconvFunction(Function):
@staticmethod
def forward(ctx, x, weights, padding_l):
ctx.padding_l = padding_l
outputs = dynamicconv_cuda.forward(x, weights, padding_l)
variables = [x, weights]
ctx.save_for_backward(*variables)
return outputs[0]
@staticmethod
def backward(ctx, grad_output):
outputs = dynamicconv_cuda.backward(
grad_output.contiguous(),
ctx.padding_l,
*ctx.saved_tensors)
grad_input, grad_weights = outputs
return grad_input, grad_weights, None
@with_incremental_state
class DynamicconvLayer(nn.Module):
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
weight_softmax=False,
num_heads=1,
weight_dropout=0.,
bias=False,
renorm_padding=False,
conv_bias=False,
query_size=None,
):
super(DynamicconvLayer, self).__init__()
self.input_size = input_size
self.query_size = input_size if query_size is None else query_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_softmax = weight_softmax
self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__)
self.renorm_padding = renorm_padding
self.bias = bias
self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias)
if conv_bias:
self.conv_bias = nn.Parameter(torch.Tensor(input_size))
else:
self.conv_bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_linear.weight)
if self.conv_bias is not None:
nn.init.constant_(self.conv_bias, 0.)
nn.init.constant_(self.weight_linaer.bias, 0.)
def forward(self, x, incremental_state=None, query=None, unfold=None):
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
# R = C // H
# during inference time, incremental BMM is faster
if incremental_state is not None:
unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory
unfold = unfold or (incremental_state is not None)
assert query is None
if query is None:
query = x
if unfold:
output = self._forward_unfolded(x, incremental_state, query)
else:
output = self._forward_expanded(x, incremental_state, query)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
# during training time, use CUDA kernel
else:
weight = self.weight_linear(x).view(T, B, H, K)
if self.weight_softmax:
weight = F.softmax(weight, dim=-1)
if self.weight_dropout_module.p:
weight = self.weight_dropout_module(weight)
weight = weight.permute(1, 2, 3, 0).contiguous()
self.filters = weight
x = x.permute(1, 2, 0).contiguous()
output = dynamicconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def _forward_unfolded(self, x, incremental_state, query):
'''The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right.'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight_linear(query).view(T*B*H, -1)
# renorm_padding is only implemented in _forward_expanded
assert not self.renorm_padding or incremental_state is not None
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:])
x_unfold = x_unfold.view(T*B*H, R, -1)
else:
padding_l = self.padding_l
if K > T and padding_l == K-1:
weight = weight.narrow(1, K-T, T)
K, padding_l = T, T-1
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, K, padding_l, 0)
x_unfold = x_unfold.view(T*B*H, R, K)
if self.weight_softmax and not self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = weight.narrow(1, 0, K)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2):]
K = weight.size(1)
if self.weight_softmax and self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_stat, query):
'''Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight_linear(query).view(T*B*H, -1)
if not self.renorm_padding:
if self.weight_softmax:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
weight = weight.narrow(1, 0, K).contiguous()
weight = weight.view(T, B*H, K).transpose(0, 1)
x = x.view(T, B*H, R).transpose(0, 1)
if self.weight_softmax and self.renorm_padding:
# turn the convolution filters into band matrices
weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf'))
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, self.padding_l, T)
# normalize the weight over valid positions like self-attention
weight_expanded = F.softmax(weight_expanded, dim=2)
weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)
else:
P = self.padding_l
# For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length
if K > T and P == K-1:
weight = weight.narrow(2, K-T, T)
K, P = T, T-1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False)
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
| 8,719 | 39.184332 | 129 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/fc/oni_fc.py
|
"""
Orthogonalization by Newton’s Iteration
"""
import torch.nn
import torch.nn.functional as F
from torch.nn import Parameter
from torch.autograd import Variable
from typing import List
from torch.autograd.function import once_differentiable
__all__ = ['WN_Conv2d', 'OWN_Conv2d', 'ONI_Conv2d','ONI_ConvTranspose2d',
'ONI_Linear']
# norm funcitons--------------------------------
class IdentityModule(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(IdentityModule, self).__init__()
def forward(self, input: torch.Tensor):
return input
class WNorm(torch.nn.Module):
def forward(self, weight):
weight_ = weight.view(weight.size(0), -1)
#std = weight_.std(dim=1, keepdim=True) + 1e-5
norm = weight_.norm(dim=1, keepdim=True) + 1e-5
weight_norm = weight_ / norm
return weight_norm.view(weight.size())
class OWNNorm(torch.nn.Module):
def __init__(self, norm_groups=1, *args, **kwargs):
super(OWNNorm, self).__init__()
self.norm_groups = norm_groups
def matrix_power3(self, Input):
B=torch.bmm(Input, Input)
return torch.bmm(B, Input)
def forward(self, weight: torch.Tensor):
assert weight.shape[0] % self.norm_groups == 0
Z = weight.view(self.norm_groups, weight.shape[0] // self.norm_groups, -1) # type: torch.Tensor
Zc = Z - Z.mean(dim=-1, keepdim=True)
S = torch.matmul(Zc, Zc.transpose(1, 2))
wm = torch.randn(S.shape).to(S)
#Scales = torch.eye(S.shape[-1]).to(S).expand(S.shape)
#Us = torch.eye(S.shape[-1]).to(S).expand(S.shape)
for i in range(self.norm_groups):
U, Eig, _ = S[i].svd()
Scales = Eig.rsqrt().diag()
wm[i] = U.mm(Scales).mm(U.t())
W = wm.matmul(Zc)
#print(W.matmul(W.transpose(1,2)))
# W = oni_py.apply(weight, self.T, ctx.groups)
return W.view_as(weight)
def extra_repr(self):
fmt_str = ['OWN:']
if self.norm_groups > 1:
fmt_str.append('groups={}'.format(self.norm_groups))
return ', '.join(fmt_str)
class ONINorm(torch.nn.Module):
def __init__(self, T=5, norm_groups=1, *args, **kwargs):
super(ONINorm, self).__init__()
self.T = T
self.norm_groups = norm_groups
self.eps = 1e-5
def matrix_power3(self, Input):
B=torch.bmm(Input, Input)
return torch.bmm(B, Input)
def forward(self, weight: torch.Tensor):
assert weight.shape[0] % self.norm_groups == 0
Z = weight.view(self.norm_groups, weight.shape[0] // self.norm_groups, -1) # type: torch.Tensor
Zc = Z - Z.mean(dim=-1, keepdim=True)
S = torch.matmul(Zc, Zc.transpose(1, 2))
eye = torch.eye(S.shape[-1]).to(S).expand(S.shape)
S = S + self.eps*eye
norm_S = S.norm(p='fro', dim=(1, 2), keepdim=True)
S = S.div(norm_S)
B = [torch.Tensor([]) for _ in range(self.T + 1)]
B[0] = torch.eye(S.shape[-1]).to(S).expand(S.shape)
for t in range(self.T):
#B[t + 1] = torch.baddbmm(1.5, B[t], -0.5, torch.matrix_power(B[t], 3), S)
B[t + 1] = torch.baddbmm(1.5, B[t], -0.5, self.matrix_power3(B[t]), S)
W = B[self.T].matmul(Zc).div_(norm_S.sqrt())
#print(W.matmul(W.transpose(1,2)))
# W = oni_py.apply(weight, self.T, ctx.groups)
return W.view_as(weight)
def extra_repr(self):
fmt_str = ['T={}'.format(self.T)]
if self.norm_groups > 1:
fmt_str.append('groups={}'.format(self.norm_groups))
return ', '.join(fmt_str)
class ONINorm_colum(torch.nn.Module):
def __init__(self, T=5, norm_groups=1, *args, **kwargs):
super(ONINorm_colum, self).__init__()
self.T = T
self.norm_groups = norm_groups
self.eps = 1e-5
#print(self.eps)
def matrix_power3(self, Input):
B=torch.bmm(Input, Input)
return torch.bmm(B, Input)
def forward(self, weight: torch.Tensor):
assert weight.shape[0] % self.norm_groups == 0
Z = weight.view(self.norm_groups, weight.shape[0] // self.norm_groups, -1) # type: torch.Tensor
Zc = Z - Z.mean(dim=-1, keepdim=True)
S = torch.matmul(Zc.transpose(1, 2), Zc)
eye = torch.eye(S.shape[-1]).to(S).expand(S.shape)
S = S + self.eps*eye
norm_S = S.norm(p='fro', dim=(1, 2), keepdim=True)
#print(S.size())
#S = S.div(norm_S)
B = [torch.Tensor([]) for _ in range(self.T + 1)]
B[0] = torch.eye(S.shape[-1]).to(S).expand(S.shape)
for t in range(self.T):
#B[t + 1] = torch.baddbmm(1.5, B[t], -0.5, torch.matrix_power(B[t], 3), S)
B[t + 1] = torch.baddbmm(1.5, B[t], -0.5, self.matrix_power3(B[t]), S)
W = Zc.matmul(B[self.T]).div_(norm_S.sqrt())
#print(W.matmul(W.transpose(1,2)))
# W = oni_py.apply(weight, self.T, ctx.groups)
return W.view_as(weight)
def extra_repr(self):
fmt_str = ['T={}'.format(self.T)]
if self.norm_groups > 1:
fmt_str.append('groups={}'.format(self.norm_groups))
return ', '.join(fmt_str)
# normedConvs--------------------------------
class WN_Conv2d(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
NScale=1.414, adjustScale=False, *args, **kwargs):
super(WN_Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
print('WN_Conv:---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = WNorm()
self.scale_ = torch.ones(out_channels, 1, 1, 1).fill_(NScale)
if adjustScale:
self.WNScale = Parameter(self.scale_)
else:
# self.scale = Variable(self.scale, requires_grad=False)
self.register_buffer('WNScale', self.scale_)
def forward(self, input_f: torch.Tensor) -> torch.Tensor:
weight_q = self.weight_normalization(self.weight)
weight_q = weight_q * self.WNScale
out = F.conv2d(input_f, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups)
return out
class OWN_Conv2d(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
norm_groups=1, norm_channels=0, NScale=1.414, adjustScale=False, *args, **kwargs):
super(OWN_Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
if norm_channels > 0:
norm_groups = out_channels // norm_channels
print('OWN_Conv:----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = OWNNorm(norm_groups=norm_groups)
self.scale_ = torch.ones(out_channels, 1, 1, 1).fill_(NScale)
if adjustScale:
self.WNScale = Parameter(self.scale_)
else:
# self.scale = Variable(self.scale, requires_grad=False)
self.register_buffer('WNScale', self.scale_)
def forward(self, input_f: torch.Tensor) -> torch.Tensor:
weight_q = self.weight_normalization(self.weight)
weight_q = weight_q * self.WNScale
out = F.conv2d(input_f, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups)
return out
class ONI_Conv2d(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
T=5, norm_groups=1, norm_channels=0, NScale=1.414, adjustScale=False, ONIRow_Fix=False, *args, **kwargs):
super(ONI_Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
print('ONI channels:--OD:',out_channels, '--ID:', in_channels, '--KS',kernel_size)
if out_channels <= (in_channels*kernel_size*kernel_size):
if norm_channels > 0:
norm_groups = out_channels // norm_channels
#print('ONI_Conv_Row:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = ONINorm(T=T, norm_groups=norm_groups)
else:
if ONIRow_Fix:
# print('ONI_Conv_Row:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = ONINorm(T=T, norm_groups=norm_groups)
else:
# print('ONI_Conv_Colum:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = ONINorm_colum(T=T, norm_groups=norm_groups)
self.scale_ = torch.ones(out_channels, 1, 1, 1).fill_(NScale)
if adjustScale:
self.WNScale = Parameter(self.scale_)
else:
# self.scale = Variable(self.scale, requires_grad=False)
self.register_buffer('WNScale', self.scale_)
def forward(self, input_f: torch.Tensor) -> torch.Tensor:
weight_q = self.weight_normalization(self.weight)
weight_q = weight_q * self.WNScale
out = F.conv2d(input_f, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups)
return out
class ONI_Linear(torch.nn.Linear):
def __init__(self, in_channels, out_channels, bias=True,
T=4, norm_groups=1, norm_channels=0, NScale=1, adjustScale=False, *args, **kwargs):
super(ONI_Linear, self).__init__(in_channels, out_channels, bias)
if out_channels <= in_channels:
if norm_channels > 0:
norm_groups = out_channels // norm_channels
print('ONI_Linear_Row:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = ONINorm(T=T, norm_groups=norm_groups)
else:
print('ONI_Linear_Colum:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = ONINorm_colum(T=T, norm_groups=norm_groups)
self.scale_ = torch.ones(out_channels, 1, ).fill_(NScale)
if adjustScale:
self.WNScale = Parameter(self.scale_)
else:
# self.scale = Variable(self.scale, requires_grad=False)
self.register_buffer('WNScale', self.scale_)
def forward(self, input_f: torch.Tensor) -> torch.Tensor:
weight_q = self.weight_normalization(self.weight)
weight_q = weight_q * self.WNScale
out = F.linear(input_f, weight_q, self.bias)
return out
#Trans Conv
class ONI_ConvTranspose2d(torch.nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True,
T=5, norm_groups=1, NScale=1.414, adjustScale=False):
super(ONI_ConvTranspose2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, output_padding, groups, bias, dilation)
print('ONI_Column:--T=', T, '----norm_groups:', norm_groups, '---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = ONINorm(T=T, norm_groups=norm_groups)
self.scale_ = torch.ones(out_channels, 1, 1, 1).fill_(NScale)
if adjustScale:
self.scale = Parameter(self.scale_)
else:
# self.scale = Variable(self.scale, requires_grad=False)
self.register_buffer('scale', self.scale_)
def forward(self, input_f: torch.Tensor, output_size=None) -> torch.Tensor:
output_padding = self._output_padding(input_f, output_size, self.stride, self.padding, self.kernel_size)
weight_q = self.weight_normalization(self.weight)
weight_q = weight_q * self.scale
out = F.conv_transpose2d(input_f, weight_q, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation)
return out
if __name__ == '__main__':
SEED = 0
torch.manual_seed(SEED)
oni_ = ONINorm(T=5, norm_groups=1)
w_ = torch.randn(4, 4, 2, 2)
print(w_)
w_.requires_grad_()
y_ = oni_(w_)
z_ = y_.view(w_.size(0), -1)
#print(z_.sum(dim=1))
print(z_.matmul(z_.t()))
# y_.sum().backward()
# print('w grad', w_.grad.size())
# conv=ONI_Conv2d(4, 2, 1, adjustScale=True)
# b = conv(w_)
# print(b)
| 12,549 | 41.398649 | 146 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/fc/dropout_fc.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['DropoutFC']
class DropoutFC(nn.Linear):
def __init__(self, in_features, out_features, bias=True, dropout=0, scale=1.0):
super(DropoutFC, self).__init__(in_features, out_features, bias)
print('DropoutFC dropout:{}, scale:{}'.format(dropout,scale))
self.weight_dropout_module = nn.Dropout(p=dropout)
nn.init.xavier_uniform_(self.weight,gain=scale)
nn.init.constant_(self.bias, 0.0)
def forward(self, input_f: torch.Tensor) -> torch.Tensor:
weight_q = self.weight_dropout_module(self.weight)
out = F.linear(input_f, weight_q, self.bias)
return out
if __name__ == '__main__':
m = DropoutFC(2,4)
w_ = torch.randn(5, 2)
w_.requires_grad_()
y_ = m(w_)
#z_ = y_.view(w_.size(0), -1)
#print(z_.norm(dim=1))
y_.sum().backward()
print('w grad', w_.grad.size())
| 940 | 28.40625 | 83 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/fc/conv.py
|
import torch
from torch import nn
import torch.nn.functional as F
class Conv1d(nn.Conv1d):
def __init__(self,in_channels, out_channels, kernel_size=3, stride=1):
self.padding = (kernel_size-1)//2
self.stride = stride
super(Conv1d, self).__init__(in_channels, out_channels, kernel_size, stride=stride,padding=self.padding)
def extra_repr(self):
return 'in_channels{in_channels}, out_channels{out_channels},kernel_size{kernel_size}'.format(**self.__dict__)
def forward(self,x):
#T,B,C
x = x.permute(1,2,0) #T,B,C-->B,C,T
x = F.conv1d(x, self.weight, bias=self.bias, stride=self.stride, padding=self.padding)
x = x.permute(2,0,1) #B,C,T-->T,B,C
return x
| 674 | 38.705882 | 112 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/fc/wn.py
|
import torch.nn
import torch.nn.functional as F
from torch.nn import Parameter
from torch.autograd import Variable
from typing import List
from torch.autograd.function import once_differentiable
__all__ = ['CWN']
# norm funcitons--------------------------------
class CWNorm(torch.nn.Module):
def forward(self, weight):
weight_ = weight.view(weight.size(0), -1)
weight_mean = weight_.mean(dim=1, keepdim=True)
weight_ = weight_ - weight_mean
norm = weight_.norm(dim=1, keepdim=True) + 1e-5
weight_CWN = weight_ / norm
return weight_CWN.view(weight.size())
#默认scale是1.414,可能是由于relu
class CWN(torch.nn.Linear):
def __init__(self, in_features, out_features, iscenter=True, bias=True,
NScale=1, adjustScale=False, *args, **kwargs):
super(CWN, self).__init__(in_features, out_features, bias)
print('CWN:---NScale:', NScale, '---adjust:', adjustScale)
self.weight_normalization = CWNorm()
if NScale<0:
self.scale_ = torch.norm(self.weight.data,dim=1,keepdim=True)
else:
self.scale_ = torch.ones(out_features, 1).fill_(NScale)
if adjustScale:
self.WNScale = Parameter(self.scale_)
else:
# self.scale = Variable(self.scale, requires_grad=False)
self.register_buffer('WNScale', self.scale_)
def forward(self, input_f: torch.Tensor) -> torch.Tensor:
weight_q = self.weight_normalization(self.weight)
weight_q = weight_q * self.WNScale
out = F.linear(input_f, weight_q, self.bias)
return out
class MultiHeadCWN(torch.nn.Linear):
def __init__(self, in_features, out_features, bias=True, num_heads=8,
NScale=1, adjustScale=False, *args, **kwargs):
super(MultiHeadCWN, self).__init__(in_features, out_features, bias)
print('MultiHeadCWN:---NScale:', NScale, '---adjust:', adjustScale)
self.in_features = in_features
self.out_features = out_features
self.num_heads = num_heads
self.weight_normalization = CWNorm()
self.scale_ = torch.ones(out_features, 1).fill_(NScale)
if adjustScale:
self.WNScale = Parameter(self.scale_)
else:
# self.scale = Variable(self.scale, requires_grad=False)
self.register_buffer('WNScale', self.scale_)
def forward(self, input_f: torch.Tensor) -> torch.Tensor:
#self.weight = self.weight.reshape(self.in_features*self.num_heads, self.out_features//self.num_heads)
#self.weight = self.weight.view(self.in_features*self.num_heads, self.out_features//self.num_heads)
weight_q = self.weight_normalization(self.weight)
#weight_q = weight_q.reshape(self.in_features, self.out_features)
weight_q = weight_q * self.WNScale
out = F.linear(input_f, weight_q, self.bias)
return out
if __name__ == '__main__':
cwn_ = CWNorm()
print(cwn_)
w_ = torch.randn(4, 3, 2)
w_.requires_grad_()
y_ = cwn_(w_)
z_ = y_.view(w_.size(0), -1)
print(z_.norm(dim=1))
y_.sum().backward()
print('w grad', w_.grad.size())
| 3,164 | 38.5625 | 110 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/__init__.py
| 0 | 0 | 0 |
py
|
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/quantization_options.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def parse_config_yaml(yaml_data):
# Initialize to default options.
quantization_options = {
"n_centroids": {
"Linear": ["in_features", {"*": 256}],
"Embedding": ["embedding_dim", {"*": 256}],
},
"block_sizes": {
"Linear": ["fuzzy_name", {"fc": 8, "attn": 4, "emb": 4}],
"Embedding": ["fuzzy_name", {"emb": 8}],
},
"layers_to_quantize": [
"decoder\\.layers\\.\\d+\\.fc[12]",
"decoder\\.embed_tokens\\.embeddings\\.[012]\\.[01]",
"decoder\\.layers\\.\\d+\\.self_attn\\.(k_proj|v_proj|q_proj|out_proj)",
],
}
if "n_centroids" in yaml_data:
quantization_options["n_centroids"] = {
layer: convert_yaml_to_tuple(layer_data)
for layer, layer_data in yaml_data["n_centroids"].items()
}
if "block_sizes" in yaml_data:
quantization_options["block_sizes"] = {
layer: convert_yaml_to_tuple(layer_data)
for layer, layer_data in yaml_data["block_sizes"].items()
}
if "layers_to_quantize" in yaml_data:
quantization_options["layers_to_quantize"] = yaml_data["layers_to_quantize"]
return quantization_options
def convert_yaml_to_tuple(yaml_dictionary):
"""Converts a yaml dictionary with two keys: `key` and `value` into a two
argument tuple of those values."""
return (yaml_dictionary["key"], yaml_dictionary["value"])
| 1,647 | 35.622222 | 84 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/pq/em.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import logging
from collections import Counter
import torch
class EM:
"""
EM algorithm used to quantize the columns of W to minimize
||W - W_hat||^2
Args:
- W: weight matrix of size (in_features x out_features)
- n_iter: number of k-means iterations
- n_centroids: number of centroids (size of codebook)
- eps: for cluster reassignment when an empty cluster is found
- max_tentatives for cluster reassignment when an empty cluster is found
- verbose: print error after each iteration
Remarks:
- If one cluster is empty, the most populated cluster is split into
two clusters
- All the relevant dimensions are specified in the code
"""
def __init__(
self, W, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True
):
self.W = W
self.n_centroids = n_centroids
self.n_iter = n_iter
self.eps = eps
self.max_tentatives = max_tentatives
self.verbose = verbose
self.centroids = torch.Tensor()
self.assignments = torch.Tensor()
self.objective = []
def initialize_centroids(self):
"""
Initializes the centroids by sampling random columns from W.
"""
in_features, out_features = self.W.size()
indices = torch.randint(
low=0, high=out_features, size=(self.n_centroids,)
).long()
self.centroids = self.W[:, indices].t() # (n_centroids x in_features)
def step(self, i):
"""
There are two standard steps for each iteration: expectation (E) and
minimization (M). The E-step (assignment) is performed with an exhaustive
search and the M-step (centroid computation) is performed with
the exact solution.
Args:
- i: step number
Remarks:
- The E-step heavily uses PyTorch broadcasting to speed up computations
and reduce the memory overhead
"""
# assignments (E-step)
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
n_empty_clusters = self.resolve_empty_clusters()
# centroids (M-step)
for k in range(self.n_centroids):
W_k = self.W[:, self.assignments == k] # (in_features x size_of_cluster_k)
self.centroids[k] = W_k.mean(dim=1) # (in_features)
# book-keeping
obj = (self.centroids[self.assignments].t() - self.W).norm(p=2).item()
self.objective.append(obj)
if self.verbose:
logging.info(
f"Iteration: {i},\t"
f"objective: {obj:.6f},\t"
f"resolved empty clusters: {n_empty_clusters}"
)
def resolve_empty_clusters(self):
"""
If one cluster is empty, the most populated cluster is split into
two clusters by shifting the respective centroids. This is done
iteratively for a fixed number of tentatives.
"""
# empty clusters
counts = Counter(map(lambda x: x.item(), self.assignments))
empty_clusters = set(range(self.n_centroids)) - set(counts.keys())
n_empty_clusters = len(empty_clusters)
tentatives = 0
while len(empty_clusters) > 0:
# given an empty cluster, find most populated cluster and split it into two
k = random.choice(list(empty_clusters))
m = counts.most_common(1)[0][0]
e = torch.randn_like(self.centroids[m]) * self.eps
self.centroids[k] = self.centroids[m].clone()
self.centroids[k] += e
self.centroids[m] -= e
# recompute assignments
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
# check for empty clusters
counts = Counter(map(lambda x: x.item(), self.assignments))
empty_clusters = set(range(self.n_centroids)) - set(counts.keys())
# increment tentatives
if tentatives == self.max_tentatives:
logging.info(
f"Could not resolve all empty clusters, {len(empty_clusters)} remaining"
)
raise EmptyClusterResolveError
tentatives += 1
return n_empty_clusters
def compute_distances(self):
"""
For every centroid m, computes
||M - m[None, :]||_2
Remarks:
- We rely on PyTorch's broadcasting to speed up computations
and reduce the memory overhead
- Without chunking, the sizes in the broadcasting are modified as:
(n_centroids x n_samples x out_features) -> (n_centroids x out_features)
- The broadcasting computation is automatically chunked so that
the tensors fit into the memory of the GPU
"""
nb_centroids_chunks = 1
while True:
try:
return torch.cat(
[
(self.W[None, :, :] - centroids_c[:, :, None]).norm(p=2, dim=1)
for centroids_c in self.centroids.chunk(
nb_centroids_chunks, dim=0
)
],
dim=0,
)
except RuntimeError:
nb_centroids_chunks *= 2
def assign(self):
"""
Assigns each column of W to its closest centroid, thus essentially
performing the E-step in train().
Remarks:
- The function must be called after train() or after loading
centroids using self.load(), otherwise it will return empty tensors
"""
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
def save(self, path, layer):
"""
Saves centroids and assignments.
Args:
- path: folder used to save centroids and assignments
"""
torch.save(self.centroids, os.path.join(path, "{}_centroids.pth".format(layer)))
torch.save(
self.assignments, os.path.join(path, "{}_assignments.pth".format(layer))
)
torch.save(self.objective, os.path.join(path, "{}_objective.pth".format(layer)))
def load(self, path, layer):
"""
Loads centroids and assignments from a given path
Args:
- path: folder use to load centroids and assignments
"""
self.centroids = torch.load(
os.path.join(path, "{}_centroids.pth".format(layer))
)
self.assignments = torch.load(
os.path.join(path, "{}_assignments.pth".format(layer))
)
self.objective = torch.load(
os.path.join(path, "{}_objective.pth".format(layer))
)
class EmptyClusterResolveError(Exception):
pass
| 7,333 | 33.59434 | 92 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/pq/pq.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .em import EM, EmptyClusterResolveError
class PQ(EM):
"""
Quantizes the layer weights W with the standard Product Quantization
technique. This learns a codebook of codewords or centroids of size
block_size from W. For further reference on using PQ to quantize
neural networks, see "And the Bit Goes Down: Revisiting the Quantization
of Neural Networks", Stock et al., ICLR 2020.
PQ is performed in two steps:
(1) The matrix W (weights or fully-connected or convolutional layer)
is reshaped to (block_size, -1).
- If W is fully-connected (2D), its columns are split into
blocks of size block_size.
- If W is convolutional (4D), its filters are split along the
spatial dimension.
(2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix.
Args:
- W: weight matrix to quantize of size (in_features x out_features)
- block_size: size of the blocks (subvectors)
- n_centroids: number of centroids
- n_iter: number of k-means iterations
- eps: for cluster reassignment when an empty cluster is found
- max_tentatives for cluster reassignment when an empty cluster is found
- verbose: print information after each iteration
Remarks:
- block_size be compatible with the shape of W
"""
def __init__(
self,
W,
block_size,
n_centroids=256,
n_iter=20,
eps=1e-6,
max_tentatives=30,
verbose=True,
):
self.block_size = block_size
W_reshaped = self._reshape(W)
super(PQ, self).__init__(
W_reshaped,
n_centroids=n_centroids,
n_iter=n_iter,
eps=eps,
max_tentatives=max_tentatives,
verbose=verbose,
)
def _reshape(self, W):
"""
Reshapes the matrix W as expained in step (1).
"""
# fully connected: by convention the weight has size out_features x in_features
if len(W.size()) == 2:
self.out_features, self.in_features = W.size()
assert (
self.in_features % self.block_size == 0
), "Linear: n_blocks must be a multiple of in_features"
return (
W.reshape(self.out_features, -1, self.block_size)
.permute(2, 1, 0)
.flatten(1, 2)
)
# convolutional: we reshape along the spatial dimension
elif len(W.size()) == 4:
self.out_channels, self.in_channels, self.k_h, self.k_w = W.size()
assert (
self.in_channels * self.k_h * self.k_w
) % self.block_size == 0, (
"Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w"
)
return (
W.reshape(self.out_channels, -1, self.block_size)
.permute(2, 1, 0)
.flatten(1, 2)
)
# not implemented
else:
raise NotImplementedError(W.size())
def encode(self):
"""
Performs self.n_iter EM steps.
"""
self.initialize_centroids()
for i in range(self.n_iter):
try:
self.step(i)
except EmptyClusterResolveError:
break
def decode(self):
"""
Returns the encoded full weight matrix. Must be called after
the encode function.
"""
# fully connected case
if "k_h" not in self.__dict__:
return (
self.centroids[self.assignments]
.reshape(-1, self.out_features, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
# convolutional case
else:
return (
self.centroids[self.assignments]
.reshape(-1, self.out_channels, self.block_size)
.permute(1, 0, 2)
.reshape(self.out_channels, self.in_channels, self.k_h, self.k_w)
)
| 4,292 | 32.27907 | 87 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/pq/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
from operator import attrgetter, itemgetter
import numpy as np
import torch.nn as nn
import torch.distributed as dist
from .modules import PQConv2d, PQLinear, PQEmbedding
from .pq import PQ
def quantize_model_(
model,
size_tracker,
layers_to_quantize,
block_sizes_config,
n_centroids_config,
step=0,
n_iter=15,
eps=1e-6,
max_tentatives=100,
verbose=True,
):
"""
Quantize a model in-place by stages. All the targeted
layers are replaced by their quantized counterpart,
and the model is ready for the finetuning of the
centroids in a standard training loop (no modifications
required). Note that we do not quantize biases.
Args:
- model: a nn.Module
- size_tracker: useful for tracking quatization statistics
- layers_to_quantize: a list containing regexps for
filtering the layers to quantize at each stage according
to their name (as in model.named_parameters())
- block_sizes_config: dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
- n_centroids_config: dict like
{
'Conv2d': ('kernel_size', {'*': 256}),
'Linear': ('in_features', {'*': 256})
}
For instance, all conv2d layers are quantized with 256 centroids
- step: the layers to quantize inplace corresponding
to layers_to_quantize[step]
"""
quantized_layers = get_layers(model, layers_to_quantize[step])
for layer in quantized_layers:
# book-keeping
is_master_process = (not dist.is_initialized()) or (dist.is_initialized() and dist.get_rank() == 0)
verbose = verbose and is_master_process
# get block size and centroids
module = attrgetter(layer)(model)
block_size = get_param(module, layer, block_sizes_config)
n_centroids = get_param(module, layer, n_centroids_config)
if verbose:
logging.info(f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids")
# quantize layer
weight = module.weight.data.clone()
is_bias = 'bias' in [x[0] for x in module.named_parameters()]
bias = module.bias.data.clone() if is_bias else None
quantizer = PQ(
weight,
block_size,
n_centroids=n_centroids,
n_iter=n_iter,
eps=eps,
max_tentatives=max_tentatives,
verbose=verbose,
)
# quantization performed on all GPUs with same seed
quantizer.encode()
centroids = quantizer.centroids.contiguous()
assignments = quantizer.assignments.contiguous()
# broadcast results to make sure weights are up-to-date
if dist.is_initialized():
dist.broadcast(centroids, 0)
dist.broadcast(assignments, 0)
# instantiate the quantized counterpart
if isinstance(module, nn.Linear):
out_features, in_features = map(
lambda k: module.__dict__[k], ["out_features", "in_features"]
)
quantized_module = PQLinear(
centroids, assignments, bias, in_features, out_features
)
elif isinstance(module, nn.Embedding):
num_embeddings, embedding_dim = map(
lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"]
)
quantized_module = PQEmbedding(
centroids, assignments, num_embeddings, embedding_dim
)
elif isinstance(module, nn.Conv2d):
out_channels, in_channels, kernel_size = map(
lambda k: module.__dict__[k],
["out_channels", "in_channels", "kernel_size"],
)
stride, padding, dilation, groups, padding_mode = map(
lambda k: module.__dict__[k],
["stride", "padding", "dilation", "groups", "padding_mode"],
)
quantized_module = PQConv2d(
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
)
else:
raise ValueError(f"Module {module} not yet supported for quantization")
# replace layer by its quantized counterpart
attrsetter(layer)(model, quantized_module)
# update statistics
size_tracker.update(weight, block_size, n_centroids)
# return name of quantized layers
return quantized_layers
def get_layers(model, filter_regexp):
"""
Filters out the layers according to a regexp. Note that
we omit biases.
Args:
- model: a nn.Module
- filter_regexp: a regexp to filter the layers to keep
according to their name in model.named_parameters().
For instance, the regexp:
down_layers\\.[123456]\\.(conv[12]|identity\\.conv))
is keeping blocks down_layers from 1 to 6, and inside
each block is keeping conv1, conv2 and identity.conv.
Remarks:
- We add (module\\.)? at the beginning of the regexp to
account for the possible use of nn.parallel.DataParallel
"""
# get all parameter names
all_layers = map(itemgetter(0), model.named_parameters())
# remove biases
all_layers = filter(lambda x: "bias" not in x, all_layers)
# remove .weight in all other names (or .weight_orig is spectral norm)
all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers)
all_layers = map(lambda x: x.replace(".weight", ""), all_layers)
# return filtered layers
filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")"
r = re.compile(filter_regexp)
return list(filter(r.match, all_layers))
def get_param(module, layer_name, param_config):
"""
Given a quantization configuration, get the right parameter
for the module to be quantized.
Args:
- module: a nn.Module
- layer_name: the name of the layer
- param_config: a dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
Remarks:
- if 'fuzzy_name' is passed as a parameter, layers whose layer_name
include 'fuzzy_name' will be assigned the given parameter.
In the following example, conv.expand layers will have a block
size of 9 while conv.reduce will have a block size of 4 and all
other layers will have a block size of 2.
{
'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}),
'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4})
}
"""
layer_type = module.__class__.__name__
if layer_type not in param_config:
raise KeyError(f"Layer type {layer_type} not in config for layer {module}")
feature, params = param_config[module.__class__.__name__]
if feature != "fuzzy_name":
feature_value = str(getattr(module, feature))
if feature_value not in params:
if "*" in params:
feature_value = "*"
else:
raise KeyError(
f"{feature}={feature_value} not in config for layer {module}"
)
else:
feature_values = [name for name in params if name in layer_name]
if len(feature_values) == 0:
if "*" in params:
feature_value = "*"
else:
raise KeyError(
f"name={layer_name} not in config for {module}"
)
else:
feature_value = feature_values[0]
return params[feature_value]
class SizeTracker(object):
"""
Class to keep track of the compressed network size with iPQ.
Args:
- model: a nn.Module
Remarks:
- The compressed size is the sum of three components
for each layer in the network:
(1) Storing the centroids given by iPQ in fp16
(2) Storing the assignments of the blocks in int8
(3) Storing all non-compressed elements such as biases
- This cost in only valid if we use 256 centroids (then
indexing can indeed by done with int8).
"""
def __init__(self, model):
self.model = model
self.size_non_compressed_model = self.compute_size()
self.size_non_quantized = self.size_non_compressed_model
self.size_index = 0
self.size_centroids = 0
self.n_quantized_layers = 0
def compute_size(self):
"""
Computes the size of the model (in MB).
"""
res = 0
for _, p in self.model.named_parameters():
res += p.numel()
return res * 4 / 1024 / 1024
def update(self, W, block_size, n_centroids):
"""
Updates the running statistics when quantizing a new layer.
"""
# bits per weights
bits_per_weight = np.log2(n_centroids) / block_size
self.n_quantized_layers += 1
# size of indexing the subvectors of size block_size (in MB)
size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024
self.size_index += size_index_layer
# size of the centroids stored in float16 (in MB)
size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024
self.size_centroids += size_centroids_layer
# size of non-compressed layers, e.g. LayerNorms or biases (in MB)
size_uncompressed_layer = W.numel() * 4 / 1024 / 1024
self.size_non_quantized -= size_uncompressed_layer
def __repr__(self):
size_compressed = (
self.size_index + self.size_centroids + self.size_non_quantized
)
compression_ratio = self.size_non_compressed_model / size_compressed # NOQA
return (
f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. "
f"After quantizing {self.n_quantized_layers} layers, size "
f"(indexing + centroids + other): {self.size_index:.2f} MB + "
f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = "
f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x"
)
def attrsetter(*items):
def resolve_attr(obj, attr):
attrs = attr.split(".")
head = attrs[:-1]
tail = attrs[-1]
for name in head:
obj = getattr(obj, name)
return obj, tail
def g(obj, val):
for attr in items:
resolved_obj, resolved_attr = resolve_attr(obj, attr)
setattr(resolved_obj, resolved_attr, val)
return g
| 11,605 | 33.541667 | 110 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/pq/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .utils import SizeTracker, quantize_model_ # NOQA
| 234 | 32.571429 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/pq/modules/qlinear.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class PQLinear(nn.Module):
"""
Quantized counterpart of nn.Linear module. Stores the centroid, the assignments
and the non-quantized biases. The full weight is re-instantiated at each forward
pass.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_features x n_blocks
- bias: the non-quantized bias
Remarks:
- We refer the reader to the official documentation of the nn.Linear module
for the other arguments and the behavior of the module
- Performance tests on GPU show that this implementation is 15% slower than
the non-quantized nn.Linear module for a standard training loop.
"""
def __init__(self, centroids, assignments, bias, in_features, out_features):
super(PQLinear, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_features = in_features
self.out_features = out_features
# check compatibility
if self.in_features % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.out_features != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.out_features, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
def forward(self, x):
return F.linear(
x,
self.weight,
self.bias,
)
def extra_repr(self):
return f"in_features={self.in_features},\
out_features={self.out_features},\
n_centroids={self.n_centroids},\
block_size={self.block_size},\
bias={self.bias is not None}"
| 2,547 | 34.388889 | 86 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/pq/modules/qconv.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
class PQConv2d(nn.Module):
"""
Quantized counterpart of nn.Conv2d module. Stores the centroid, the assignments
and the non-quantized biases. The full weight is re-instantiated at each forward
pass and autograd automatically computes the gradients with respect to the
centroids.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_channels x n_blocks
- bias: the non-quantized bias, must be either torch.Tensor or None
Remarks:
- We refer the reader to the official documentation of the nn.Conv2d module
for the other arguments and the behavior of the module.
- Performance tests on GPU show that this implementation is 10% slower than
the non-quantized nn.Conv2d module for a standard training loop.
- During the backward, the gradients are averaged by cluster and not summed.
This explains the hook registered to the centroids.
"""
def __init__(
self,
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode="zeros",
):
super(PQConv2d, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.padding_mode = padding_mode
# check compatibility
if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % out_channels != 0:
raise ValueError("Wrong PQ sizes")
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
if out_channels % groups != 0:
raise ValueError("out_channels must be divisible by groups")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
if bias is not None:
self.bias = nn.Parameter(bias)
else:
self.register_parameter("bias", None)
# register hook for averaging gradients per centroids instead of summing
self.centroids.register_hook(lambda x: x / self.counts[:, None])
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.out_channels, self.block_size)
.permute(1, 0, 2)
.reshape(
self.out_channels, self.in_channels // self.groups, *self.kernel_size
)
)
def forward(self, x):
return F.conv2d(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def extra_repr(self):
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
if self.padding_mode != "zeros":
s += ", padding_mode={padding_mode}"
s += ", n_centroids={n_centroids}, block_size={block_size}"
return s.format(**self.__dict__)
| 4,245 | 35.603448 | 87 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/pq/modules/qemb.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class PQEmbedding(nn.Module):
"""
Quantized counterpart of nn.Embedding module. Stores the centroids and
the assignments. The full weight is re-instantiated at each forward
pass.
Args:
- centroids: centroids of size n_centroids x block_size
- assignments: assignments of the centroids to the subvectors
of size self.out_features x n_blocks
- bias: the non-quantized bias
Remarks:
- We refer the reader to the official documentation of the nn.Embedding module
for the other arguments and the behavior of the module
- Performance tests on GPU show that this implementation is 10% slower than
the non-quantized nn.Embedding module for a standard training loop.
"""
def __init__(self, centroids, assignments, num_embeddings, embedding_dim,
padding_idx=None, max_norm=None, norm_type=2.,
scale_grad_by_freq=False, sparse=False, _weight=None):
super(PQEmbedding, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
# check compatibility
if self.embedding_dim % self.block_size != 0:
raise ValueError("Wrong PQ sizes")
if len(assignments) % self.num_embeddings != 0:
raise ValueError("Wrong PQ sizes")
# define parameters
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer("assignments", assignments)
self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
@property
def weight(self):
return (
self.centroids[self.assignments]
.reshape(-1, self.num_embeddings, self.block_size)
.permute(1, 0, 2)
.flatten(1, 2)
)
def forward(self, input):
return F.embedding(
input, self.weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if self.sparse is not False:
s += ', sparse=True'
s += ', n_centroids={n_centroids}, block_size={block_size}'
return s.format(**self.__dict__)
| 3,515 | 38.954545 | 103 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/pq/modules/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qconv import PQConv2d # NOQA
from .qlinear import PQLinear # NOQA
from .qemb import PQEmbedding # NOQA
| 290 | 31.333333 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/scalar/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from operator import attrgetter
import torch.nn as nn
import torch.distributed as dist
from ..pq.utils import get_layers, attrsetter
from .modules import IntConv2d, IntLinear, IntEmbedding, ActivationQuantizer
MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d}
def quantize_model_(model, p=0.2, bits=8, update_step=3000):
"""
Replaces all modules with their scalar quantized counterpart and
registers hooks to quantize the post-ativations of those modules.
Args:
- model: a nn.Module
- p: amount of noise (0 for no noise, 1 to quantize all the weights/activations)
- bits: number of bits
- update_step: update quantization parameters every update_step steps
"""
# quantize all layers
quantized_layers = get_layers(model, "(.*?)")
for layer in quantized_layers:
# book-keeping
is_master_process = (not dist.is_initialized()) or (dist.is_initialized() and dist.get_rank() == 0)
# recover module
module = attrgetter(layer)(model)
if is_master_process:
logging.info(f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}")
# quantization params
q_params = {"p": p, "update_step": update_step, "bits": bits, "method": "histogram", "counter": 0}
# instantiate the quantized counterpart
if isinstance(module, tuple(MAPPING.keys())):
QuantizedModule = MAPPING[module.__class__]
quantized_module = QuantizedModule.__new__(QuantizedModule)
params = module.__dict__
params.update(q_params)
quantized_module.__dict__.update(params)
else:
if is_master_process:
logging.info(f"Module {module} not yet supported for quantization")
continue
# activation quantization
a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method="histogram")
# replace layer by its quantized counterpart
attrsetter(layer)(model, quantized_module)
# return name of quantized layers
return quantized_layers
| 2,323 | 33.176471 | 107 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/scalar/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .utils import quantize_model_ # NOQA
| 221 | 30.714286 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/scalar/ops.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def emulate_int(w, bits, method, scale=None, zero_point=None):
q = globals()[f"emulate_int{bits}_{method}"]
return q(w, scale=scale, zero_point=zero_point)
def quantize(w, scale, zero_point):
return (torch.clamp(torch.round(w / scale + zero_point), 0, 255) - zero_point) * scale
def emulate_int8_histogram(w, scale=None, zero_point=None):
if scale is None:
obs = torch.quantization.observer.HistogramObserver()
_ = obs(w.float())
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point), scale, zero_point
def emulate_int8_channel(w, scale=None, zero_point=None):
if scale is None:
obs = torch.quantization.observer.PerChannelMinMaxObserver(
ch_axis=-1, qscheme=torch.per_channel_symmetric
)
_ = obs(w)
scale, zero_point, ch_axis = obs.get_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point), scale, zero_point
def emulate_int8_tensor(w, scale=None, zero_point=None):
if scale is None:
obs = torch.quantization.observer.MinMaxObserver()
_ = obs(w)
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return quantize(w, scale, zero_point), scale, zero_point
| 1,669 | 33.791667 | 90 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/qlinear.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..ops import emulate_int
class IntLinear(nn.Module):
"""
Quantized counterpart of the nn.Linear module that applies QuantNoise during training.
Args:
- in_features: input features
- out_features: output features
- bias: bias or not
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick.
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
in_features,
out_features,
bias=True,
p=0,
update_step=3000,
bits=8,
method="histogram",
):
super(IntLinear, self).__init__()
self.in_features = int(in_features)
self.out_features = int(out_features)
self.weight = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.chosen_bias = bias
if self.chosen_bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.chosen_bias:
nn.init.constant_(self.bias, 0.0)
return
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 100 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = - self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach()
# return output
output = F.linear(input, weight, self.bias)
return output
def extra_repr(self):
return "in_features={}, out_features={}, bias={}, quant_noise={}, bits={}, method={}".format(
self.in_features,
self.out_features,
self.bias is not None,
self.p,
self.bits,
self.method,
)
| 3,596 | 31.405405 | 101 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/qconv.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from ..ops import emulate_int
class IntConv2d(_ConvNd):
"""
Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training.
Args:
- standard nn.Conv2d parameters
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-thgourh estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
p=0,
bits=8,
method="histogram",
update_step=1000,
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(IntConv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
False,
_pair(0),
groups,
bias,
padding_mode,
)
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def _conv_forward(self, input, weight):
if self.padding_mode != "zeros":
return F.conv2d(
F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight,
self.bias,
self.stride,
_pair(0),
self.dilation,
self.groups,
)
return F.conv2d(
input,
weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 100 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = - self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach()
# return output
output = self._conv_forward(input, weight)
return output
def extra_repr(self):
return (
"in_channels={}, out_channels={}, kernel_size={}, stride={}, "
"padding={}, dilation={}, groups={}, bias={}, quant_noise={}, "
"bits={}, method={}".format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.groups,
self.bias is not None,
self.p,
self.bits,
self.method,
)
)
| 4,415 | 29.040816 | 95 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/qemb.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..ops import emulate_int
class IntEmbedding(nn.Module):
"""
Quantized counterpart of the nn.Embedding module that applies QuantNoise during training.
Args:
- num_embeddings: number of tokens
- embedding_dim: embedding dimension
- p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- bits: number of bits
- method: choose among {"tensor", "histogram", "channel"}
- update_step: recompute scale and zero_point every update_steps iterations
Remarks:
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- At test time, the weights are fully quantized
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
p=0,
update_step=1000,
bits=8,
method="histogram",
):
super(IntEmbedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if _weight is None:
self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = nn.Parameter(_weight)
self.sparse = sparse
# quantization parameters
self.p = p
self.bits = bits
self.method = method
self.update_step = update_step
self.counter = 0
def reset_parameters(self):
nn.init.normal_(self.weight)
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input):
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.training else 1
# update parameters every 1000 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# quantize weight
weight_quantized, self.scale, self.zero_point = emulate_int(
self.weight.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(self.weight)
mask.bernoulli_(1 - p)
noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = - self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
weight = torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) + noise.detach()
# return output
output = F.embedding(
input, weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
return output
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if self.sparse is not False:
s += ', sparse=True'
s += 'quant_noise={p}, bits={bits}, method={method}'
return s.format(**self.__dict__)
| 4,771 | 34.879699 | 103 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qconv import IntConv2d # NOQA
from .qlinear import IntLinear # NOQA
from .qemb import IntEmbedding # NOQA
from .qact import ActivationQuantizer # NOQA
| 339 | 33 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/quantization/scalar/modules/qact.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ..ops import emulate_int
class ActivationQuantizer:
"""
Fake scalar quantization of the activations using a forward hook.
Args:
- module. a nn.Module for which we quantize the *post-activations*
- p: proportion of activations to quantize, set by default to 1
- update_step: to recompute quantization parameters
- bits: number of bits for quantization
- method: choose among {"tensor", "histogram", "channel"}
- clamp_threshold: to prevent gradients overflow
Remarks:
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- For the list of quantization methods and number of bits, see ops.py
- To remove the hook from the module, simply call self.handle.remove()
- At test time, the activations are fully quantized
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- The activations are hard-clamped in [-clamp_threshold, clamp_threshold]
to prevent overflow during the backward pass
"""
def __init__(self, module, p=1, update_step=1000, bits=8,
method="histogram", clamp_threshold=5):
self.module = module
self.p = p
self.update_step = update_step
self.counter = 0
self.bits = bits
self.method = method
self.clamp_threshold = clamp_threshold
self.handle = None
self.register_hook()
def register_hook(self):
# forward hook
def quantize_hook(module, x, y):
# update parameters every 1000 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.module.training else 1
# quantize activations
y_q, self.scale, self.zero_point = emulate_int(
y.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(y)
mask.bernoulli_(1 - p)
noise = (y_q - y).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = - self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
return torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach()
# register hook
self.handle = self.module.register_forward_hook(quantize_hook)
| 3,033 | 36.45679 | 87 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/lightconv_layer/cuda_function_gen.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
std::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = filters.size(0);
const auto filterSize = filters.size(1);
const auto numFiltersInBlock = numFeatures / numHeads;
const dim3 blocks(minibatch, numFeatures);
auto output = at::zeros_like(input);
auto stream = at::cuda::getCurrentCUDAStream();
"""
sequence_if = """
if (sequenceLength <= {seq}) {{
switch(filterSize) {{
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {pad}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{
lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
filters.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
output.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;
}
break;
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;
}
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
final_return = """
}
return {output};
}
"""
with open("lightconv_cuda_forward.cu", 'w') as forward:
forward.write(head)
for seq in seqs:
forward.write(sequence_if.format(seq=seq))
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(con_else)
forward.write(final_else)
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(final_return)
def gen_backward():
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
std::vector<at::Tensor> lightconv_cuda_backward(
at::Tensor gradOutput,
int padding_l,
at::Tensor input,
at::Tensor filters) {
// gradWrtInput
const int minibatch = input.size(0);
const int numFeatures = input.size(1);
const int sequenceLength = input.size(2);
const int numHeads = filters.size(0);
const int filterSize = filters.size(1);
const dim3 gradBlocks(minibatch, numFeatures);
const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads);
const dim3 weightGradSecondpassBlocks(numHeads, filterSize);
const int numFiltersInBlock = numFeatures / numHeads;
auto gradInput = at::zeros_like(input);
auto gradFilters = at::zeros_like(filters);
at::DeviceGuard g(input.device());
auto stream = at::cuda::getCurrentCUDAStream();
switch(filterSize) {
"""
sequence_if = """
if (sequenceLength <= {seq}) {{
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {p}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{
lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t>
<<<gradBlocks, {b_size}, 0, stream>>>(
gradOutput.data<scalar_t>(),
filters.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
gradInput.data<scalar_t>());
"""
weight_grad_short = """
at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat));
lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t>
<<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
gradOutput.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
tempSumGradFilters.data<float>()
);
lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t>
<<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(
tempSumGradFilters.data<float>(),
minibatch,
numFiltersInBlock,
gradFilters.data<scalar_t>()
);
}}));
}} else
"""
weight_grad = """
at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat));
lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t>
<<<gradBlocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
gradOutput.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
tempSumGradFilters.data<float>()
);
lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t>
<<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(
tempSumGradFilters.data<float>(),
minibatch,
numFiltersInBlock,
gradFilters.data<scalar_t>()
);
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
"""
breakout = """
break;
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
last_return = """
}
return {gradInput, gradFilters};
}
"""
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
thresh = [32, 32, 64, 128, 256, -1, -1, -1]
max_mem = [-1, -1, -1, -1, -1, 192, 96, 64]
with open("lightconv_cuda_backward.cu", 'w') as backward:
backward.write(head)
for (k, t, mem) in zip(kernels, thresh, max_mem):
backward.write(case_k.format(k=k))
for seq in seqs:
if (t == -1 or seq <= t) and (mem == -1 or seq < mem):
backward.write(sequence_if.format(seq=seq))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=seq, p=p))
backward.write(weight_grad_short.format(k=k, b_size=seq, p=p))
backward.write(bad_padding)
else:
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=32, p=p))
backward.write(weight_grad.format(k=k, b_size=32, p=p))
backward.write(bad_padding)
backward.write(breakout)
break
backward.write(con_else)
backward.write(bad_filter)
backward.write(last_return)
if __name__ == "__main__":
gen_forward()
gen_backward()
| 9,642 | 32.251724 | 141 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/lightconv_layer/setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
setup(
name='lightconv_layer',
ext_modules=[
CUDAExtension('lightconv_cuda', [
'lightconv_cuda.cpp',
'lightconv_cuda_kernel.cu',
]),
],
cmdclass={
'build_ext': BuildExtension
})
| 545 | 25 | 67 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/lightconv_layer/lightconv_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.autograd import Function
import torch.nn.functional as F
import lightconv_cuda
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
class lightconvFunction(Function):
@staticmethod
def forward(ctx, x, weights, padding_l):
ctx.padding_l = padding_l
outputs = lightconv_cuda.forward(x, weights, padding_l)
variables = [x, weights]
ctx.save_for_backward(*variables)
return outputs[0]
@staticmethod
def backward(ctx, grad_output):
outputs = lightconv_cuda.backward(
grad_output.contiguous(),
ctx.padding_l,
*ctx.saved_tensors)
grad_input, grad_weights = outputs
return grad_input, grad_weights, None
@with_incremental_state
class LightconvLayer(nn.Module):
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
weight_softmax=False,
num_heads=1,
weight_dropout=0.,
bias=False,
):
super(LightconvLayer, self).__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_softmax = weight_softmax
self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__)
self.weight = nn.Parameter(torch.Tensor(num_heads, kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(input_size))
else:
self.bias = None
self.reset_parameters()
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + '.' if name != '' else ''
for k, v in state_dict.items():
if k.endswith(prefix + 'weight'):
if v.dim() == 3 and v.size(1) == 1:
state_dict[k] = v.squeeze(1)
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
if self.bias is not None:
nn.init.constant_(self.bias, 0.)
def forward(self, x, incremental_state=None):
# during inference time, incremental BMM is faster
if incremental_state is not None:
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:])
x_unfold = x_unfold.view(T*B*H, R, -1)
weight = self.weight
if self.weight_softmax:
weight = F.softmax(weight.float(), dim=1).type_as(weight)
weight = weight[:, -x_unfold.size(2):]
K = weight.size(1)
weight = weight.view(1, H, K).expand(T*B, H, K).contiguous().view(T*B*H, K, 1)
weight = self.weight_dropout_module(weight)
output = torch.bmm(x_unfold, weight) # T*B*H x R x 1
output = output.view(T, B, C)
return output
# during training time, use CUDA kernel
else:
x = x.permute(1, 2, 0).contiguous()
weight = self.weight
if self.weight_softmax:
weight = F.softmax(self.weight, -1)
if self.weight_dropout_module.p:
weight = self.weight_dropout_module(weight)
return lightconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1)
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def half(self):
return self._apply(lambda t: t.half() if t.is_floating_point() else t)
| 4,679 | 35 | 104 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/modules/lightconv_layer/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .lightconv_layer import LightconvLayer # noqa
| 230 | 32 | 65 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/logging/progress_bar.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
import atexit
import json
import logging
import os
import sys
from collections import OrderedDict
from contextlib import contextmanager
from numbers import Number
from typing import Optional
import torch
from .meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = 'tqdm',
):
if log_format is None:
log_format = default_log_format
if log_format == 'tqdm' and not sys.stderr.isatty():
log_format = 'simple'
if log_format == 'json':
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'none':
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == 'simple':
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'tqdm':
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
return bar
def build_progress_bar(
args,
iterator,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default: str = 'tqdm',
no_progress_bar: str = 'none',
):
"""Legacy wrapper that takes an argparse.Namespace."""
if getattr(args, 'no_progress_bar', False):
default = no_progress_bar
if getattr(args, 'distributed_rank', 0) == 0:
tensorboard_logdir = getattr(args, 'tensorboard_logdir', None)
else:
tensorboard_logdir = None
return progress_bar(
iterator,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=tensorboard_logdir,
default_log_format=default,
)
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class BaseProgressBar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.n = getattr(iterable, 'n', 0)
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += 'epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class JsonProgressBar(BaseProgressBar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if (
step > 0
and self.log_interval is not None
and step % self.log_interval == 0
):
update = (
self.epoch - 1 + (self.i + 1) / float(self.size)
if self.epoch is not None
else None
)
stats = self._format_stats(stats, epoch=self.epoch, update=update)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag is not None:
self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class NoopProgressBar(BaseProgressBar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
pass
class SimpleProgressBar(BaseProgressBar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if (
step > 0
and self.log_interval is not None
and step % self.log_interval == 0
):
stats = self._format_stats(stats)
postfix = self._str_commas(stats)
with rename_logger(logger, tag):
logger.info(
'{}: {:5d} / {:d} {}'
.format(self.prefix, self.i + 1, self.size, postfix)
)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
class TqdmProgressBar(BaseProgressBar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(
iterable,
self.prefix,
leave=False,
disable=(logger.getEffectiveLevel() > logging.INFO),
)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
try:
_tensorboard_writers = {}
from tensorboardX import SummaryWriter
except ImportError:
SummaryWriter = None
def _close_writers():
for w in _tensorboard_writers.values():
w.close()
atexit.register(_close_writers)
class TensorboardProgressBarWrapper(BaseProgressBar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
if SummaryWriter is None:
logger.warning(
"tensorboard not found, please install with: pip install tensorboardX"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text('sys.argv', " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or '')
if writer is None:
return
if step is None:
step = stats['num_updates']
for key in stats.keys() - {'num_updates'}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
writer.flush()
| 11,082 | 29.786111 | 89 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/logging/metrics.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A standalone module for aggregating metrics.
Metrics can be logged from anywhere using the `log_*` functions defined
in this module. The logged values will be aggregated dynamically based
on the aggregation context in which the logging occurs. See the
:func:`aggregate` context manager for more details.
"""
from collections import defaultdict, OrderedDict
import contextlib
import time
from typing import Callable, Dict, List, Optional
import uuid
from .meters import *
# Aggregation contexts are considered "active" when inside the scope
# created by the :func:`aggregate` context manager.
_aggregators = OrderedDict()
_active_aggregators = OrderedDict()
_active_aggregators_cnt = defaultdict(lambda: 0)
def reset() -> None:
"""Reset all metrics aggregators."""
_aggregators.clear()
_active_aggregators.clear()
_active_aggregators_cnt.clear()
# The "default" aggregator observes all logged values.
_aggregators["default"] = MetersDict()
_active_aggregators["default"] = _aggregators["default"]
_active_aggregators_cnt["default"] = 1
reset()
@contextlib.contextmanager
def aggregate(name: Optional[str] = None, new_root: bool = False):
"""Context manager to aggregate metrics under a given name.
Aggregations can be nested. If *new_root* is ``False``, then logged
metrics will be recorded along the entire stack of nested
aggregators, including a global "default" aggregator. If *new_root*
is ``True``, then this aggregator will be the root of a new
aggregation stack, thus bypassing any parent aggregators.
Note that aggregation contexts are uniquely identified by their
*name* (e.g., train, valid). Creating a context with an existing
name will reuse the corresponding :class:`MetersDict` instance.
If no name is given, then a temporary aggregator will be created.
Usage::
with metrics.aggregate("train"):
for step, batch in enumerate(epoch):
with metrics.aggregate("train_inner") as agg:
metrics.log_scalar("loss", get_loss(batch))
if step % log_interval == 0:
print(agg.get_smoothed_value("loss"))
agg.reset()
print(metrics.get_smoothed_values("train")["loss"])
Args:
name (str): name of the aggregation. Defaults to a
random/temporary name if not given explicitly.
new_root (bool): make this aggregation the root of a new
aggregation stack.
"""
if name is None:
# generate a temporary name
name = str(uuid.uuid4())
assert name not in _aggregators
agg = MetersDict()
else:
assert name != "default"
agg = _aggregators.setdefault(name, MetersDict())
if new_root:
backup_aggregators = _active_aggregators.copy()
_active_aggregators.clear()
backup_aggregators_cnt = _active_aggregators_cnt.copy()
_active_aggregators_cnt.clear()
_active_aggregators[name] = agg
_active_aggregators_cnt[name] += 1
yield agg
_active_aggregators_cnt[name] -= 1
if _active_aggregators_cnt[name] == 0 and name in _active_aggregators:
del _active_aggregators[name]
if new_root:
_active_aggregators.clear()
_active_aggregators.update(backup_aggregators)
_active_aggregators_cnt.clear()
_active_aggregators_cnt.update(backup_aggregators_cnt)
def get_active_aggregators() -> List[MetersDict]:
return list(_active_aggregators.values())
def log_scalar(
key: str,
value: float,
weight: float = 1,
priority: int = 10,
round: Optional[int] = None,
):
"""Log a scalar value.
Args:
key (str): name of the field to log
value (float): value to log
weight (float): weight that this value contributes to the average.
A weight of 0 will always log the latest value.
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, AverageMeter(round=round), priority)
agg[key].update(value, weight)
def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20):
"""Log a scalar value derived from other meters.
Args:
key (str): name of the field to log
fn (Callable[[MetersDict], float]): function that takes a single
argument *meters* and returns the derived value
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, MetersDict._DerivedMeter(fn), priority)
def log_speed(
key: str,
value: float,
priority: int = 30,
round: Optional[int] = None,
):
"""Log the rate of some quantity per second.
Args:
key (str): name of the field to log
value (float): value to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, TimeMeter(round=round), priority)
agg[key].reset() # reset meter on the first call
else:
agg[key].update(value)
def log_start_time(key: str, priority: int = 40, round: Optional[int] = None):
"""Log the duration of some event in seconds.
The duration will be computed once :func:`log_stop_time` is called.
Args:
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, StopwatchMeter(round=round), priority)
agg[key].start()
def log_stop_time(key: str, weight: float = 0., prehook=None):
"""Log the duration of some event in seconds.
The duration will be computed since :func:`log_start_time` was called.
Set weight > 0 to report the average time instead of the sum.
Args:
key (str): name of the field to log
weight (float): weight that this time contributes to the average
prehook (function, no arguments): will be called before the timer
is stopped. For example, use prehook=torch.cuda.synchronize to
make sure all gpu operations are done before timer is stopped.
"""
for agg in get_active_aggregators():
if key in agg:
agg[key].stop(weight, prehook)
def log_custom(
new_meter_fn: Callable[[], Meter],
key: str,
*args,
priority: int = 50,
**kwargs,
):
"""Log using a custom Meter.
Any extra *args* or *kwargs* will be passed through to the Meter's
*update* method.
Args:
new_meter_fn (Callable[[], Meter]): function that returns a new
Meter instance
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs)
def reset_meter(name: str, key: str) -> None:
"""Reset Meter instance aggregated under a given *name* and *key*."""
meter = get_meter(name, key)
if meter is not None:
meter.reset()
def reset_meters(name: str) -> None:
"""Reset Meter instances aggregated under a given *name*."""
meters = get_meters(name)
if meters is not None:
meters.reset()
def get_meter(name: str, key: str) -> Meter:
"""Get a single Meter instance aggregated under *name* and *key*.
Returns:
Meter or None if no metrics have been logged under *name* and *key*.
"""
if name not in _aggregators:
return None
return _aggregators[name].get(key, None)
def get_meters(name: str) -> MetersDict:
"""Get Meter instances aggregated under a given *name*.
Returns:
MetersDict or None if no metrics have been logged under *name*.
"""
return _aggregators.get(name, None)
def get_smoothed_value(name: str, key: str) -> float:
"""Get a single smoothed value.
Raises:
KeyError: if no metrics have been logged under *name* and *key*.
"""
return _aggregators[name].get_smoothed_value(key)
def get_smoothed_values(name: str) -> Dict[str, float]:
"""Get smoothed values aggregated under a given *name*.
Raises:
KeyError: if no metrics have been logged under *name*.
"""
return _aggregators[name].get_smoothed_values()
def state_dict():
return OrderedDict([
(name, agg.state_dict())
for name, agg in _aggregators.items()
])
def load_state_dict(state_dict):
for name, agg_state in state_dict.items():
_aggregators[name] = MetersDict()
_aggregators[name].load_state_dict(agg_state)
| 9,325 | 30.938356 | 81 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/logging/__init__.py
| 0 | 0 | 0 |
py
|
|
RegularizedBN
|
RegularizedBN-main/fairseq/logging/meters.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
from collections import OrderedDict
import time
from typing import Dict, Optional
try:
import torch
def type_as(a, b):
if torch.is_tensor(a) and torch.is_tensor(b):
return a.to(b)
else:
return a
except ImportError:
torch = None
def type_as(a, b):
return a
try:
import numpy as np
except ImportError:
np = None
class Meter(object):
"""Base class for Meters."""
def __init__(self):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
def reset(self):
raise NotImplementedError
@property
def smoothed_value(self) -> float:
"""Smoothed value used for logging."""
raise NotImplementedError
def safe_round(number, ndigits):
if hasattr(number, '__round__'):
return round(number, ndigits)
elif torch is not None and torch.is_tensor(number) and number.numel() == 1:
return safe_round(number.item(), ndigits)
elif np is not None and np.ndim(number) == 0 and hasattr(number, 'item'):
return safe_round(number.item(), ndigits)
else:
return number
class AverageMeter(Meter):
"""Computes and stores the average and current value"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.reset()
def reset(self):
self.val = None # most recent update
self.sum = 0 # sum from all updates
self.count = 0 # total n from all updates
def update(self, val, n=1):
if val is not None:
self.val = val
if n > 0:
self.sum = type_as(self.sum, val) + (val * n)
self.count = type_as(self.count, n) + n
def state_dict(self):
return {
'val': self.val,
'sum': self.sum,
'count': self.count,
'round': self.round,
}
def load_state_dict(self, state_dict):
self.val = state_dict['val']
self.sum = state_dict['sum']
self.count = state_dict['count']
self.round = state_dict.get('round', None)
@property
def avg(self):
return self.sum / self.count if self.count > 0 else self.val
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class TimeMeter(Meter):
"""Computes the average occurrence of some event per second"""
def __init__(
self,
init: int = 0,
n: int = 0,
round: Optional[int] = None,
):
self.round = round
self.reset(init, n)
def reset(self, init=0, n=0):
self.init = init
self.start = time.perf_counter()
self.n = n
self.i = 0
def update(self, val=1):
self.n = type_as(self.n, val) + val
self.i += 1
def state_dict(self):
return {
'init': self.elapsed_time,
'n': self.n,
'round': self.round,
}
def load_state_dict(self, state_dict):
if 'start' in state_dict:
# backwards compatibility for old state_dicts
self.reset(init=state_dict['init'])
else:
self.reset(init=state_dict['init'], n=state_dict['n'])
self.round = state_dict.get('round', None)
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.perf_counter() - self.start)
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class StopwatchMeter(Meter):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1, prehook=None):
if self.start_time is not None:
if prehook is not None:
prehook()
delta = time.perf_counter() - self.start_time
self.sum = self.sum + delta
self.n = type_as(self.n, n) + n
def reset(self):
self.sum = 0 # cumulative time during which stopwatch was active
self.n = 0 # total n across all start/stop
self.start()
def state_dict(self):
return {
'sum': self.sum,
'n': self.n,
'round': self.round,
}
def load_state_dict(self, state_dict):
self.sum = state_dict['sum']
self.n = state_dict['n']
self.start_time = None
self.round = state_dict.get('round', None)
@property
def avg(self):
return self.sum / self.n if self.n > 0 else self.sum
@property
def elapsed_time(self):
if self.start_time is None:
return 0.
return time.perf_counter() - self.start_time
@property
def smoothed_value(self) -> float:
val = self.avg if self.sum > 0 else self.elapsed_time
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class MetersDict(OrderedDict):
"""A sorted dictionary of :class:`Meters`.
Meters are sorted according to a priority that is given when the
meter is first added to the dictionary.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.priorities = []
def __setitem__(self, key, value):
assert key not in self, "MetersDict doesn't support reassignment"
priority, value = value
bisect.insort(self.priorities, (priority, len(self.priorities), key))
super().__setitem__(key, value)
for _, _, key in self.priorities: # reorder dict to match priorities
self.move_to_end(key)
def add_meter(self, key, meter, priority):
self.__setitem__(key, (priority, meter))
def state_dict(self):
return [
(pri, key, self[key].__class__.__name__, self[key].state_dict())
for pri, _, key in self.priorities
# can't serialize DerivedMeter instances
if not isinstance(self[key], MetersDict._DerivedMeter)
]
def load_state_dict(self, state_dict):
self.clear()
self.priorities.clear()
for pri, key, meter_cls, meter_state in state_dict:
meter = globals()[meter_cls]()
meter.load_state_dict(meter_state)
self.add_meter(key, meter, pri)
def get_smoothed_value(self, key: str) -> float:
"""Get a single smoothed value."""
meter = self[key]
if isinstance(meter, MetersDict._DerivedMeter):
return meter.fn(self)
else:
return meter.smoothed_value
def get_smoothed_values(self) -> Dict[str, float]:
"""Get all smoothed values."""
return OrderedDict([
(key, self.get_smoothed_value(key))
for key in self.keys()
if not key.startswith("_")
])
def reset(self):
"""Reset Meter instances."""
for meter in self.values():
if isinstance(meter, MetersDict._DerivedMeter):
continue
meter.reset()
class _DerivedMeter(Meter):
"""A Meter whose values are derived from other Meters."""
def __init__(self, fn):
self.fn = fn
def reset(self):
pass
| 7,885 | 26.477352 | 79 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/criterions/fairseq_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from typing import Any, Dict, List
from torch.nn.modules.loss import _Loss
from fairseq import metrics, utils
class FairseqCriterion(_Loss):
def __init__(self, task):
super().__init__()
self.task = task
if hasattr(task, 'target_dictionary'):
tgt_dict = task.target_dictionary
self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
pass
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
# Criterions can override this, but for convenience we also try
# to automatically map argparse.Namespace keys to corresponding
# arguments in the __init__.
init_args = {}
for p in inspect.signature(cls).parameters.values():
if (
p.kind == p.POSITIONAL_ONLY
or p.kind == p.VAR_POSITIONAL
or p.kind == p.VAR_KEYWORD
):
# we haven't implemented inference for these argument types,
# but PRs welcome :)
raise NotImplementedError('{} not supported'.format(p.kind))
assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY}
if p.name == 'task':
init_args['task'] = task
elif hasattr(args, p.name):
init_args[p.name] = getattr(args, p.name)
elif p.default != p.empty:
pass # we'll use the default value
else:
raise NotImplementedError(
'Unable to infer Criterion arguments, please implement '
'{}.build_criterion'.format(cls.__name__)
)
return cls(**init_args)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(
logging_outputs: List[Dict[str, Any]],
) -> Dict[str, Any]:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
'The aggregate_logging_outputs API is deprecated. '
'Please use the reduce_metrics API instead.'
)
raise NotImplementedError
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
'Criterions should implement the reduce_metrics API. '
'Falling back to deprecated aggregate_logging_outputs API.'
)
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for k, v in agg_logging_outputs.items():
if k in {'nsentences', 'ntokens', 'sample_size'}:
continue
metrics.log_scalar(k, v)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
class LegacyFairseqCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(task=task)
self.args = args
utils.deprecation_warning(
'Criterions should take explicit arguments instead of an '
'argparse.Namespace object, please update your criterion by '
'extending FairseqCriterion instead of LegacyFairseqCriterion.'
)
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
| 4,258 | 34.491667 | 79 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/criterions/nat_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn.functional as F
import torch
from torch import Tensor
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("nat_loss")
class LabelSmoothedDualImitationCriterion(FairseqCriterion):
def __init__(self, task, label_smoothing):
super().__init__(task)
self.label_smoothing = label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument(
'--label-smoothing',
default=0.,
type=float,
metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing',
)
def _compute_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction='none')
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction='none')
losses = losses.sum(-1)
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = nll_loss * (
1 - label_smoothing) - mean_ds(logits) * label_smoothing
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
def _custom_loss(self, loss, name="loss", factor=1.0):
return {"name": name, "loss": loss, "factor": factor}
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
nsentences, ntokens = sample["nsentences"], sample["ntokens"]
# B x T
src_tokens, src_lengths = (
sample["net_input"]["src_tokens"],
sample["net_input"]["src_lengths"],
)
tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"]
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)
losses, nll_loss = [], []
for obj in outputs:
if outputs[obj].get("loss", None) is None:
_losses = self._compute_loss(
outputs[obj].get("out"),
outputs[obj].get("tgt"),
outputs[obj].get("mask", None),
outputs[obj].get("ls", 0.0),
name=obj + '-loss',
factor=outputs[obj].get("factor", 1.0)
)
else:
_losses = self._custom_loss(
outputs[obj].get("loss"),
name=obj + '-loss',
factor=outputs[obj].get("factor", 1.0)
)
losses += [_losses]
if outputs[obj].get("nll_loss", False):
nll_loss += [_losses.get("nll_loss", 0.0)]
loss = sum(l["loss"] for l in losses)
nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 \
else loss.new_tensor(0)
# NOTE:
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
for l in losses:
logging_output[l["name"]] = (
utils.item(l["loss"].data / l["factor"])
if reduce
else l[["loss"]].data / l["factor"]
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
sample_size = utils.item(sum(log.get("sample_size", 0) for log in logging_outputs))
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs))
metrics.log_scalar('loss', loss / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('nll_loss', nll_loss / sample_size / math.log(2), sample_size, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))
for key in logging_outputs[0]:
if key[-5:] == "-loss":
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(
key[:-5],
val / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 6,238 | 34.856322 | 98 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/criterions/wav2vec_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.logging.meters import safe_round
@register_criterion('wav2vec')
class Wav2vecCriterion(FairseqCriterion):
def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
super().__init__(task)
self.infonce = infonce
self.loss_weights = None if loss_weights is None else eval(loss_weights)
self.log_keys = [] if log_keys is None else eval(log_keys)
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--infonce', action='store_true',
help='if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)')
parser.add_argument('--loss-weights', type=str, default=None,
help='weights for additional loss terms (not first one)')
parser.add_argument('--log-keys', type=str, default=None,
help='output keys to log')
def forward(self, model, sample, reduce=True, log_pred=False):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
weights = None
if hasattr(model, 'get_target_weights') and not self.infonce:
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
if self.infonce:
loss = F.cross_entropy(logits, target, reduction="sum" if reduce else "none",)
else:
loss = F.binary_cross_entropy_with_logits(logits, target.float(), weights, reduction="sum" if reduce else "none",)
sample_size = target.numel() if self.infonce else target.long().sum().item()
losses.append(loss.detach().clone())
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(self.loss_weights), f'{len(extra_losses)}, {len(self.loss_weights)}'
for p, coef in zip(extra_losses, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
losses.append(p)
logging_output = {
'loss': loss.item() if reduce else loss,
'ntokens': sample_size,
'nsentences': sample['id'].numel(),
'sample_size': sample_size,
}
for lk in self.log_keys:
if lk in net_output:
logging_output[lk] = float((net_output[lk]))
if len(losses) > 1:
for i, l in enumerate(losses):
logging_output[f'loss_{i}'] = l.item()
if self.infonce:
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = max.numel()
logging_output["correct"] = corr
logging_output["count"] = count
if log_pred:
logging_output['logits'] = logits.cpu().numpy()
logging_output['target'] = target.cpu().numpy()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs))
nsentences = utils.item(sum(log.get('nsentences', 0) for log in logging_outputs))
sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('ntokens', ntokens)
metrics.log_scalar('nsentences', nsentences)
correct = sum(log.get("correct", 0) for log in logging_outputs)
metrics.log_scalar("_correct", correct)
total = sum(log.get("count", 0) for log in logging_outputs)
metrics.log_scalar("_total", total)
if total > 0:
metrics.log_derived(
"accuracy",
lambda meters: safe_round(meters["_correct"].sum / meters["_total"].sum, 5)
if meters["_total"].sum > 0
else float("nan"),
)
builtin_keys = {'loss', 'ntokens', 'nsentences', 'sample_size', 'correct', 'count'}
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs) / len(logging_outputs)
if k.startswith('loss'):
metrics.log_scalar(k, val / sample_size / math.log(2), sample_size)
else:
metrics.log_scalar(k, val, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
| 6,437 | 39.490566 | 126 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/criterions/legacy_masked_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
def compute_cross_entropy_loss(logits, targets, ignore_index=-100):
"""
Function to compute the cross entropy loss. The default value of
ignore_index is the same as the default value for F.cross_entropy in
pytorch.
"""
assert logits.size(0) == targets.size(-1), \
"Logits and Targets tensor shapes don't match up"
loss = F.nll_loss(
F.log_softmax(logits, -1, dtype=torch.float32),
targets,
reduction="sum",
ignore_index=ignore_index,
)
return loss
@register_criterion('legacy_masked_lm_loss')
class LegacyMaskedLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
This optionally also computes the next sentence prediction (NSP) loss and
adds it to the overall loss based on the specified args. There are three
cases to consider:
1) Generic MLM training without NSP loss. In this case sentence_targets
and sentence_logits are both None.
2) BERT training without NSP loss. In this case sentence_targets is
not None but sentence_logits is None and we should not be computing
a sentence level loss.
3) BERT training with NSP loss. In this case both sentence_targets and
sentence_logits are not None and we should be computing a sentence
level loss. The weight of the sentence level loss is specified as
an argument.
"""
def __init__(self, task, masked_lm_only, nsp_loss_weight):
super().__init__(task)
self.masked_lm_only = masked_lm_only
self.nsp_loss_weight = nsp_loss_weight
@staticmethod
def add_args(parser):
"""Args for MaskedLM Loss"""
# Default for masked_lm_only is False so as to not break BERT training
parser.add_argument('--masked-lm-only', default=False,
action='store_true', help='compute MLM loss only')
parser.add_argument('--nsp-loss-weight', default=1.0, type=float,
help='weight for next sentence prediction'
' loss (default 1)')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
lm_logits, output_metadata = model(**sample["net_input"])
# reshape lm_logits from (N,T,C) to (N*T,C)
lm_logits = lm_logits.view(-1, lm_logits.size(-1))
lm_targets = sample['lm_target'].view(-1)
lm_loss = compute_cross_entropy_loss(
lm_logits, lm_targets, self.padding_idx)
# compute the number of tokens for which loss is computed. This is used
# to normalize the loss
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
loss = lm_loss / ntokens
nsentences = sample['nsentences']
# nsentences = 0
# Compute sentence loss if masked_lm_only is False
sentence_loss = None
if not self.masked_lm_only:
sentence_logits = output_metadata['sentence_logits']
sentence_targets = sample['sentence_target'].view(-1)
# This needs to be recomputed due to some differences between
# TokenBlock and BlockPair dataset. This can be resolved with a
# refactor of BERTModel which we will do in the future.
# TODO: Remove this after refactor of BERTModel
nsentences = sentence_targets.size(0)
# Check for logits being none which can happen when remove_heads
# is set to true in the BERT model. Ideally we should set
# masked_lm_only to true in this case, but that requires some
# refactor in the BERT model.
if sentence_logits is not None:
sentence_loss = compute_cross_entropy_loss(
sentence_logits, sentence_targets)
loss += self.nsp_loss_weight * (sentence_loss / nsentences)
# NOTE: as we are summing up per token mlm loss and per sentence nsp loss
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'lm_loss': utils.item(lm_loss.data) if reduce else lm_loss.data,
# sentence loss is not always computed
'sentence_loss': (
(
utils.item(sentence_loss.data) if reduce
else sentence_loss.data
) if sentence_loss is not None else 0.0
),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
lm_loss_sum = sum(log.get('lm_loss', 0) for log in logging_outputs)
sentence_loss_sum = sum(
log.get('sentence_loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_loss = sum(log.get('loss', 0) for log in logging_outputs)
metrics.log_scalar('loss', agg_loss / sample_size / math.log(2) if sample_size > 0 else 0., sample_size, round=3)
metrics.log_scalar('lm_loss', lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0., ntokens, round=3)
metrics.log_scalar('sentence_loss', sentence_loss_sum / nsentences / math.log(2) if nsentences > 0 else 0., nsentences, round=3)
metrics.log_scalar('nll_loss', lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0., ntokens, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 6,769 | 43.248366 | 136 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from .label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
@register_criterion('label_smoothed_cross_entropy_with_alignment')
class LabelSmoothedCrossEntropyCriterionWithAlignment(LabelSmoothedCrossEntropyCriterion):
def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda):
super().__init__(task, sentence_avg, label_smoothing)
self.alignment_lambda = alignment_lambda
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
LabelSmoothedCrossEntropyCriterion.add_args(parser)
parser.add_argument('--alignment-lambda', default=0.05, type=float, metavar='D',
help='weight for the alignment loss')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
alignment_loss = None
# Compute alignment loss only for training set and non dummy batches.
if 'alignments' in sample and sample['alignments'] is not None:
alignment_loss = self.compute_alignment_loss(sample, net_output)
if alignment_loss is not None:
logging_output['alignment_loss'] = utils.item(alignment_loss.data)
loss += self.alignment_lambda * alignment_loss
return loss, sample_size, logging_output
def compute_alignment_loss(self, sample, net_output):
attn_prob = net_output[1]['attn'][0]
bsz, tgt_sz, src_sz = attn_prob.shape
attn = attn_prob.view(bsz * tgt_sz, src_sz)
align = sample['alignments']
align_weights = sample['align_weights'].float()
if len(align) > 0:
# Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to
# the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing.
loss = -((attn[align[:, 1][:, None], align[:, 0][:, None]]).log() * align_weights[:, None]).sum()
else:
return None
return loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs))
nll_loss_sum = utils.item(sum(log.get('nll_loss', 0) for log in logging_outputs))
alignment_loss_sum = utils.item(sum(log.get('alignment_loss', 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs))
sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('nll_loss', nll_loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_scalar('alignment_loss', alignment_loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 4,393 | 43.836735 | 114 |
py
|
RegularizedBN
|
RegularizedBN-main/fairseq/criterions/adaptive_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('adaptive_loss')
class AdaptiveLoss(FairseqCriterion):
"""This is an implementation of the loss function accompanying the adaptive softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs"
(http://arxiv.org/abs/1609.04309)."""
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
@classmethod
def build_criterion(cls, args, task):
if getattr(args, 'ddp_backend', None) == 'c10d':
raise Exception(
'AdaptiveLoss is not compatible with the c10d '
'version of DistributedDataParallel. Please use '
'`--ddp-backend=no_c10d` instead.'
)
return cls(task, args.sentence_avg)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert hasattr(model.decoder, 'adaptive_softmax') and model.decoder.adaptive_softmax is not None
adaptive_softmax = model.decoder.adaptive_softmax
net_output = model(**sample['net_input'])
orig_target = model.get_targets(sample, net_output)
nsentences = orig_target.size(0)
orig_target = orig_target.view(-1)
bsz = orig_target.size(0)
logits, target = adaptive_softmax(net_output[0], orig_target)
assert len(target) == len(logits)
loss = net_output[0].new(1 if reduce else bsz).zero_()
for i in range(len(target)):
if target[i] is not None:
assert (target[i].min() >= 0 and target[i].max() <= logits[i].size(1))
loss += F.cross_entropy(
logits[i],
target[i],
ignore_index=self.padding_idx,
reduction='sum' if reduce else 'none',
)
orig = utils.strip_pad(orig_target, self.padding_idx)
ntokens = orig.numel()
sample_size = sample['target'].size(0) if self.sentence_avg else ntokens
logging_output = {
'loss': loss.data,
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs))
sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
if sample_size != ntokens:
metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
else:
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 3,981 | 38.039216 | 105 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.