repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
sign-topic
|
sign-topic-main/examples/latent_depth/latent_depth_src/models/latent_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import torch.nn as nn
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.transformer import TransformerDecoder, TransformerEncoder
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
from torch import Tensor
from ..modules.latent_layers import LayerSelect
class LatentTransformerEncoder(TransformerEncoder):
"""Latent depth (https://arxiv.org/abs/2009.13102) implemented in
TransformerEncoder.
"""
def __init__(self, args, dictionary, embed_tokens, num_logits=1):
self.num_logits = num_logits
self.num_layers = args.encoder_layers
super().__init__(args, dictionary, embed_tokens)
self.layer_select = LayerSelect(
num_layers=self.num_layers,
num_logits=self.num_logits,
soft_select=getattr(args, "soft_select", False),
sampling_tau=getattr(args, "sampling_tau", 5.),
)
self.lang_idx = None
self.layers = nn.ModuleList(
[self._build_encoder_layer(args, idx) for idx in range(args.encoder_layers)]
)
def set_lang_idx(self, lang_idx):
self.lang_idx = lang_idx
def _build_encoder_layer(self, args, idx=None):
return LatentTransformerEncoderLayer(args, idx, layer_select=self.layer_select)
def forward(self, src_tokens, src_lengths, return_all_hiddens: bool = False):
self.layer_select.sample(self.lang_idx)
return super().forward(src_tokens, src_lengths, return_all_hiddens)
class LatentTransformerEncoderLayer(TransformerEncoderLayer):
"""Encoder layer with each (non_residual) block weighted by samples of Bernouli
or Gumbel Signmoid samples.
Args:
args (argparse.Namespace): parsed command-line arguments from standard
TransformerEncoderLayer.
idx (int): layer index (used to retrieve samples).
layer_select (LayerSelect, optional): instance of LayerSelect module with logits
parameters and sampling method.
"""
def __init__(self, args, idx, layer_select=None):
super().__init__(args)
self.idx = idx
self.layer_select = layer_select
def residual_connection(self, x, residual):
return residual + x * self.layer_select(self.idx)
class LatentTransformerDecoder(TransformerDecoder):
"""Latent depth (https://arxiv.org/abs/2009.13102) implemented in
TransformerDecoder.
"""
def __init__(
self, args, dictionary, embed_tokens, no_encoder_attn=False, num_logits=1
):
self.num_logits = num_logits
self.num_layers = args.decoder_layers
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.layer_select = LayerSelect(
num_layers=self.num_layers,
num_logits=self.num_logits,
soft_select=getattr(args, "soft_select", False),
sampling_tau=getattr(args, "sampling_tau", 5.),
)
self.lang_idx = None
self.layers = nn.ModuleList(
[
self._build_decoder_layer(args, no_encoder_attn, idx)
for idx in range(args.decoder_layers)
]
)
def set_lang_idx(self, lang_idx):
self.lang_idx = lang_idx
def _build_decoder_layer(self, args, no_encoder_attn=False, idx=None):
return LatentTransformerDecoderLayer(
args, idx, layer_select=self.layer_select, no_encoder_attn=no_encoder_attn
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
self.layer_select.sample(self.lang_idx)
return super().forward(
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
features_only=features_only,
alignment_layer=alignment_layer,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
class LatentTransformerDecoderLayer(TransformerDecoderLayer):
"""Decoder layer with each (non_residual) block weighted by samples of Bernouli
or Gumbel Signmoid samples.
Args:
args (argparse.Namespace): parsed command-line arguments from standard
TransformerDecoderLayer.
idx (int): layer index (used to retrieve samples).
layer_select (LayerSelect, optional): instance of LayerSelect module with logits
parameters and sampling method.
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
args,
idx,
layer_select=None,
no_encoder_attn=False,
add_bias_kv=False,
add_zero_attn=False,
):
super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn)
self.idx = idx
self.layer_select = layer_select
def residual_connection(self, x, residual):
return residual + x * self.layer_select(self.idx)
| 5,584 | 34.573248 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/latent_depth/latent_depth_src/models/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/latent_depth/latent_depth_src/loss/latent_depth.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch.nn.modules.loss import _Loss
class LatentLayersKLLoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, layer_samples, lang_idx, update_num, sample_size):
prior = self.args.prior
samples = layer_samples[lang_idx]
eps = 1e-7
if prior == "uniform":
# uniform prior
kl_loss = (samples * (torch.log(samples + eps) - math.log(0.5))).sum(-1)
elif prior == "agged_posterior":
# aggregated posterior
y_t = torch.stack([x.detach() for x in layer_samples], dim=0)
agged_q = torch.sum(y_t, dim=0)
row_norm = agged_q.sum(-1)
normed_agg_q = agged_q / row_norm
kl_loss = (
samples * (torch.log(samples + eps) - torch.log(normed_agg_q + eps))
).sum(-1)
else:
raise NotImplementedError("The specified prior is not implemented.")
# normalized by number of layers
kl_loss /= layer_samples[0].size()[0]
kl_weight = min(
self.args.sparsity_weight,
(update_num - self.args.soft_update)
* self.args.sparsity_weight
/ self.args.anneal_updates,
)
kl_loss *= kl_weight * sample_size
return kl_loss
class LatentLayersSparsityLoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
def is_valid(self, update_num):
if self.args.target_layers <= 0:
return False
return update_num > (self.args.soft_update + self.args.anneal_updates)
def forward(self, layer_samples_list, update_num, sample_size):
batch_loss = 0
share_loss = 0
global_sparsity_loss = 0
layer_samples = torch.stack(layer_samples_list, dim=0)
if (
self.args.target_layers > 0 or self.args.share_weight > 0
) and update_num > (self.args.soft_update + self.args.anneal_updates):
# anneal sparsity weight
if update_num < (self.args.anneal_updates + self.args.soft_update):
weight_anneal = 0
elif update_num < (2 * self.args.anneal_updates + self.args.soft_update):
weight_anneal = (
(update_num - self.args.soft_update - self.args.anneal_updates)
* self.args.share_weight
/ self.args.anneal_updates
)
else:
weight_anneal = 1
# compute ratio among languages
layer_utilization = torch.sum(layer_samples, dim=0)
layer_utilization /= layer_samples.size()[0]
if self.args.share_weight > 0:
# encouraging sharing across languages
share_loss = sum(
-1.0 * v * math.log(v) for v in layer_utilization if v > 0
)
batch_loss += (
weight_anneal * self.args.share_weight * sample_size * share_loss
)
if self.args.target_layers > 0:
# computed expected number of layers selected
expeted_layers = sum(layer_utilization)
# compute l2 loss wrt target number of layers
global_sparsity_loss = (expeted_layers - self.args.target_layers) ** 2
batch_loss += (
weight_anneal
* self.args.share_weight
* sample_size
* global_sparsity_loss
)
return batch_loss
| 3,802 | 37.03 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/latent_depth/latent_depth_src/loss/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/m2m_100/tokenizers/tokenize_thai.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from pythainlp import word_tokenize
for line in sys.stdin:
print(" ".join(word_tokenize(line.strip())))
| 323 | 22.142857 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/m2m_100/tokenizers/tokenize_indic.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Use: echo {text} | python tokenize_indic.py {language}
import sys
from indicnlp.normalize.indic_normalize import IndicNormalizerFactory
from indicnlp.tokenize.indic_tokenize import trivial_tokenize
factory = IndicNormalizerFactory()
normalizer = factory.get_normalizer(
sys.argv[1], remove_nuktas=False, nasals_mode="do_nothing"
)
for line in sys.stdin:
normalized_line = normalizer.normalize(line.strip())
tokenized_line = " ".join(trivial_tokenize(normalized_line, sys.argv[1]))
print(tokenized_line)
| 727 | 29.333333 | 77 |
py
|
sign-topic
|
sign-topic-main/examples/m2m_100/tokenizers/tokenize_zh.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import fileinput
import sacrebleu
for line in fileinput.input():
print(sacrebleu.tokenize_zh(line))
| 309 | 19.666667 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/m2m_100/process_data/remove_too_much_punc.py
|
import gzip
import argparse
from string import punctuation
def len_no_punc(s, punc):
return len([ch for ch in s if ch in punc])
def filter_overpunc(len_npunc, len_sen):
return len_npunc < 0.5*len_sen
def main(args):
punc = punctuation + "—|–"
print('Processing file {}'.format(args.input))
with gzip.open(args.input, 'rt', encoding=args.encoding) as tsv:
with open(args.bitext + '.' + args.src_lang, 'wt', encoding=args.encoding) as fsrc:
with open(args.bitext + '.' + args.tgt_lang, 'wt', encoding=args.encoding) as ftgt:
line = tsv.readline()
fields = line.split('\t')
src, tgt = fields[1], fields[2]
nchar_npunc_src = len_no_punc(src, punc)
nchar_npunc_tgt = len_no_punc(tgt, punc)
if filter_overpunc(nchar_npunc_src, len(src)) and filter_overpunc(nchar_npunc_tgt, len(tgt)):
fsrc.write(src.strip() + '\n')
ftgt.write(tgt.strip() + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, type=str)
parser.add_argument('--encoding', default='utf-8', help='character encoding for input/output')
parser.add_argument('--bitext', type=str, required=True, help='language direction')
parser.add_argument('--src-lang', type=str, required=True, help='Source language')
parser.add_argument('--tgt-lang', type=str, required=True, help='Target language')
main(parser.parse_args())
| 1,541 | 40.675676 | 109 |
py
|
sign-topic
|
sign-topic-main/examples/m2m_100/process_data/dedup_data.py
|
import argparse
from collections import namedtuple
import os
DATADIR = "/path/to/train_data"
DEDUP_FROM_DIR = "/path/to/eval/data"
OUTPUT_DIR = "/path/to/output/data"
def main(args):
languages = set()
for language_directory in os.listdir(DATADIR):
if "_" in language_directory:
src, tgt = language_directory.split("_")
languages.add(LanguagePair(src=src, tgt=tgt))
data = existing_data()
train_languages = sorted(languages)
for language_pair in train_languages[args.start_index:args.start_index + args.size]:
print(language_pair)
dedup(language_pair, data)
LanguagePair = namedtuple("LanguagePair", ["src", "tgt"])
def existing_data():
data = set()
for file in os.listdir(DEDUP_FROM_DIR):
with open(os.path.join(DEDUP_FROM_DIR, file)) as f:
data |= set(f.readlines())
return data
def dedup(language_pair, data, verbose=True, output=True):
train_filenames = LanguagePair(
src=f"{DATADIR}/{language_pair.src}_{language_pair.tgt}/train.{language_pair.src}",
tgt=f"{DATADIR}/{language_pair.src}_{language_pair.tgt}/train.{language_pair.tgt}",
)
output_filenames = LanguagePair(
src=f"{OUTPUT_DIR}/train.dedup.{language_pair.src}-{language_pair.tgt}.{language_pair.src}",
tgt=f"{OUTPUT_DIR}/train.dedup.{language_pair.src}-{language_pair.tgt}.{language_pair.tgt}"
)
# If output exists, skip this pair. It has already been done.
if (os.path.exists(output_filenames.src) and
os.path.exists(output_filenames.tgt)):
if verbose:
print(f"{language_pair.src}-{language_pair.tgt} already done.")
return
if verbose:
print(f"{language_pair.src}-{language_pair.tgt} ready, will check dups.")
# If there is no output, no need to actually do the loop.
if not output:
return
if os.path.exists(train_filenames.src) and os.path.exists(train_filenames.tgt):
with open(train_filenames.src) as f:
train_source = f.readlines()
with open(train_filenames.tgt) as f:
train_target = f.readlines()
# do dedup
new_train_source = []
new_train_target = []
for i, train_line in enumerate(train_source):
if train_line not in data and train_target[i] not in data:
new_train_source.append(train_line)
new_train_target.append(train_target[i])
assert len(train_source) == len(train_target)
assert len(new_train_source) == len(new_train_target)
assert len(new_train_source) <= len(train_source)
with open(output_filenames.src, "w") as o:
for line in new_train_source:
o.write(line)
with open(output_filenames.tgt, "w") as o:
for line in new_train_target:
o.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start-index", required=True, type=int)
parser.add_argument("-n", "--size", required=True, type=int)
main(parser.parse_args())
| 3,132 | 33.054348 | 100 |
py
|
sign-topic
|
sign-topic-main/examples/m2m_100/process_data/clean_histogram.py
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--src', type=str, help='Source language')
parser.add_argument('--tgt', type=str, help='Target language')
parser.add_argument('--src-file', type=str, help='Input source file')
parser.add_argument('--tgt-file', type=str, help='Input target file')
parser.add_argument('--src-output-file', type=str, help='Output source file')
parser.add_argument('--tgt-output-file', type=str, help='Output target file')
parser.add_argument('--threshold', type=float, default=0.5, help='Threshold')
parser.add_argument('--threshold-character', type=str, default=']', help='Threshold character')
parser.add_argument('--histograms', type=str, help='Path to histograms')
args = parser.parse_args()
def read_hist(f):
ch = []
for line in f:
c = line[0]
if c == args.threshold_character:
break
ch.append(c)
return ch
with(open("{}/{}".format(args.histograms, args.src), 'r', encoding='utf8')) as f:
ch1 = read_hist(f)
with(open("{}/{}".format(args.histograms, args.tgt), 'r', encoding='utf8')) as f:
ch2 = read_hist(f)
print("Accepted characters for {}: {}".format(args.src, ch1))
print("Accepted characters for {}: {}".format(args.tgt, ch2))
with open(args.src_file, 'r', encoding='utf8') as fs1, open(args.tgt_file, 'r', encoding='utf8') as fs2, open(args.src_output_file, 'w', encoding='utf8') as fos1, open(args.tgt_output_file, 'w', encoding='utf8') as fos2:
ls1 = fs1.readline()
ls2 = fs2.readline()
while ls1 or ls2:
cnt1 = len([c for c in ls1.strip() if c in ch1])
cnt2 = len([c for c in ls2.strip() if c in ch2])
if cnt1 / len(ls1) > args.threshold and cnt2 / len(ls2) > args.threshold:
fos1.write(ls1)
fos2.write(ls2)
else:
print("{} {} {} \n{} {} {}".format(args.src, cnt1 / len(ls1), ls1.strip(), args.tgt, cnt2 / len(ls2), ls2.strip()))
ls1 = fs1.readline()
ls2 = fs2.readline()
| 2,010 | 37.673077 | 220 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/measure_teacher_quality.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os.path as op
import re
from tabulate import tabulate
from collections import Counter
def comp_purity(p_xy, axis):
max_p = p_xy.max(axis=axis)
marg_p = p_xy.sum(axis=axis)
indv_pur = max_p / marg_p
aggr_pur = max_p.sum()
return indv_pur, aggr_pur
def comp_entropy(p):
return (-p * np.log(p + 1e-8)).sum()
def comp_norm_mutual_info(p_xy):
p_x = p_xy.sum(axis=1, keepdims=True)
p_y = p_xy.sum(axis=0, keepdims=True)
pmi = np.log(p_xy / np.matmul(p_x, p_y) + 1e-8)
mi = (p_xy * pmi).sum()
h_x = comp_entropy(p_x)
h_y = comp_entropy(p_y)
return mi, mi / h_x, mi / h_y, h_x, h_y
def pad(labs, n):
if n == 0:
return np.array(labs)
return np.concatenate([[labs[0]] * n, labs, [labs[-1]] * n])
def comp_avg_seg_dur(labs_list):
n_frms = 0
n_segs = 0
for labs in labs_list:
labs = np.array(labs)
edges = np.zeros(len(labs)).astype(bool)
edges[0] = True
edges[1:] = labs[1:] != labs[:-1]
n_frms += len(edges)
n_segs += edges.astype(int).sum()
return n_frms / n_segs
def comp_joint_prob(uid2refs, uid2hyps):
"""
Args:
pad: padding for spliced-feature derived labels
"""
cnts = Counter()
skipped = []
abs_frmdiff = 0
for uid in uid2refs:
if uid not in uid2hyps:
skipped.append(uid)
continue
refs = uid2refs[uid]
hyps = uid2hyps[uid]
abs_frmdiff += abs(len(refs) - len(hyps))
min_len = min(len(refs), len(hyps))
refs = refs[:min_len]
hyps = hyps[:min_len]
cnts.update(zip(refs, hyps))
tot = sum(cnts.values())
ref_set = sorted({ref for ref, _ in cnts.keys()})
hyp_set = sorted({hyp for _, hyp in cnts.keys()})
ref2pid = dict(zip(ref_set, range(len(ref_set))))
hyp2lid = dict(zip(hyp_set, range(len(hyp_set))))
# print(hyp_set)
p_xy = np.zeros((len(ref2pid), len(hyp2lid)), dtype=float)
for (ref, hyp), cnt in cnts.items():
p_xy[ref2pid[ref], hyp2lid[hyp]] = cnt
p_xy /= p_xy.sum()
return p_xy, ref2pid, hyp2lid, tot, abs_frmdiff, skipped
def read_phn(tsv_path, rm_stress=True):
uid2phns = {}
with open(tsv_path) as f:
for line in f:
uid, phns = line.rstrip().split("\t")
phns = phns.split(",")
if rm_stress:
phns = [re.sub("[0-9]", "", phn) for phn in phns]
uid2phns[uid] = phns
return uid2phns
def read_lab(tsv_path, lab_path, pad_len=0, upsample=1):
"""
tsv is needed to retrieve the uids for the labels
"""
with open(tsv_path) as f:
f.readline()
uids = [op.splitext(op.basename(line.rstrip().split()[0]))[0] for line in f]
with open(lab_path) as f:
labs_list = [pad(line.rstrip().split(), pad_len).repeat(upsample) for line in f]
assert len(uids) == len(labs_list)
return dict(zip(uids, labs_list))
def main_lab_lab(
tsv_dir,
lab_dir,
lab_name,
lab_sets,
ref_dir,
ref_name,
pad_len=0,
upsample=1,
verbose=False,
):
# assume tsv_dir is the same for both the reference and the hypotheses
tsv_dir = lab_dir if tsv_dir is None else tsv_dir
uid2refs = {}
for s in lab_sets:
uid2refs.update(read_lab(f"{tsv_dir}/{s}.tsv", f"{ref_dir}/{s}.{ref_name}"))
uid2hyps = {}
for s in lab_sets:
uid2hyps.update(
read_lab(
f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample
)
)
_main(uid2refs, uid2hyps, verbose)
def main_phn_lab(
tsv_dir,
lab_dir,
lab_name,
lab_sets,
phn_dir,
phn_sets,
pad_len=0,
upsample=1,
verbose=False,
):
uid2refs = {}
for s in phn_sets:
uid2refs.update(read_phn(f"{phn_dir}/{s}.tsv"))
uid2hyps = {}
tsv_dir = lab_dir if tsv_dir is None else tsv_dir
for s in lab_sets:
uid2hyps.update(
read_lab(
f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample
)
)
_main(uid2refs, uid2hyps, verbose)
def _main(uid2refs, uid2hyps, verbose):
(p_xy, ref2pid, hyp2lid, tot, frmdiff, skipped) = comp_joint_prob(
uid2refs, uid2hyps
)
ref_pur_by_hyp, ref_pur = comp_purity(p_xy, axis=0)
hyp_pur_by_ref, hyp_pur = comp_purity(p_xy, axis=1)
(mi, mi_norm_by_ref, mi_norm_by_hyp, h_ref, h_hyp) = comp_norm_mutual_info(p_xy)
outputs = {
"ref pur": ref_pur,
"hyp pur": hyp_pur,
"H(ref)": h_ref,
"H(hyp)": h_hyp,
"MI": mi,
"MI/H(ref)": mi_norm_by_ref,
"ref segL": comp_avg_seg_dur(uid2refs.values()),
"hyp segL": comp_avg_seg_dur(uid2hyps.values()),
"p_xy shape": p_xy.shape,
"frm tot": tot,
"frm diff": frmdiff,
"utt tot": len(uid2refs),
"utt miss": len(skipped),
}
print(tabulate([outputs.values()], outputs.keys(), floatfmt=".4f"))
if __name__ == "__main__":
"""
compute quality of labels with respect to phone or another labels if set
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("lab_dir")
parser.add_argument("lab_name")
parser.add_argument("--lab_sets", default=["valid"], type=str, nargs="+")
parser.add_argument(
"--phn_dir",
default="/checkpoint/wnhsu/data/librispeech/960h/fa/raw_phn/phone_frame_align_v1",
)
parser.add_argument(
"--phn_sets", default=["dev-clean", "dev-other"], type=str, nargs="+"
)
parser.add_argument("--pad_len", default=0, type=int, help="padding for hypotheses")
parser.add_argument(
"--upsample", default=1, type=int, help="upsample factor for hypotheses"
)
parser.add_argument("--ref_lab_dir", default="")
parser.add_argument("--ref_lab_name", default="")
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
if args.ref_lab_dir and args.ref_lab_name:
main_lab_lab(
args.tsv_dir,
args.lab_dir,
args.lab_name,
args.lab_sets,
args.ref_lab_dir,
args.ref_lab_name,
args.pad_len,
args.upsample,
args.verbose,
)
else:
main_phn_lab(
args.tsv_dir,
args.lab_dir,
args.lab_name,
args.lab_sets,
args.phn_dir,
args.phn_sets,
args.pad_len,
args.upsample,
args.verbose,
)
| 6,826 | 27.210744 | 90 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/update_ckpt.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
src_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2.pt"
ref_ckpt = "/checkpoint/wnhsu/w2v/hubert_icassp_oss_v3/iter2_km100-400k-grp-L6/oss.km500_p0_1_s334.pmw1_0.puw0_0.grpnorm.ml10.mp0_8.untie.mxsz250000.ufreq1.maxtok1400000.MU100k.s1337.ngpu32/checkpoint_last.pt"
new_ckpt = "/checkpoint/wnhsu/w2v/archived/hubert_base_ls960_it2_updated.pt"
def update_state(state):
state["model"]["label_embs_concat"] = state["model"].pop("label_embs")
state["args"].task = "hubert_pretraining"
state["args"].labels = f"['{state['args'].labels}']"
return state
src_state = torch.load(src_ckpt)
src_state = update_state(src_state)
torch.save(src_state, new_ckpt)
| 873 | 37 | 209 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/simple_kmeans/dump_hubert_feature_s2t.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import io
import logging
import os
import os.path as op
import sys
from dump_hubert_feature import HubertFeatureReader
from feature_utils import get_shard_range, dump_feature
from fairseq.data.audio.audio_utils import get_waveform
from fairseq.data.audio.speech_to_text_dataset import (
read_from_uncompressed_zip,
)
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_hubert_feature_s2t")
class HubertFeatureReaderS2T(HubertFeatureReader):
def read_audio(self, path, ref_len=None):
path, *extra = path.split(":")
assert len(extra) == 2
assert path.endswith(".zip")
data = read_from_uncompressed_zip(path, int(extra[0]), int(extra[1]))
f = io.BytesIO(data)
wav, sr = get_waveform(f)
assert sr == self.task.cfg.sample_rate, sr
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_path_iterator(root, tsv, nshard, rank):
with open(tsv) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
subpaths = [op.join(root, e["audio"]) for e in reader]
start, end = get_shard_range(len(subpaths), nshard, rank)
subpaths = subpaths[start:end]
def iterate():
for subpath in subpaths:
yield op.join(root, subpath), None
return iterate, len(subpaths)
def main(
root, tsv_path, ckpt_path, layer, nshard, rank, feat_dir, split, max_chunk
):
reader = HubertFeatureReaderS2T(ckpt_path, layer, max_chunk)
generator, num = get_path_iterator(root, tsv_path, nshard, rank)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("root")
parser.add_argument("tsv_path")
parser.add_argument("ckpt_path")
parser.add_argument("layer", type=int)
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("split")
parser.add_argument("--max_chunk", type=int, default=1600000)
args = parser.parse_args()
logger.info(args)
main(**vars(args))
| 2,859 | 29.752688 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/simple_kmeans/dump_km_label.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
import joblib
import torch
import tqdm
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_km_label")
class ApplyKmeans(object):
def __init__(self, km_path):
self.km_model = joblib.load(km_path)
self.C_np = self.km_model.cluster_centers_.transpose()
self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True)
self.C = torch.from_numpy(self.C_np)
self.Cnorm = torch.from_numpy(self.Cnorm_np)
if torch.cuda.is_available():
self.C = self.C.cuda()
self.Cnorm = self.Cnorm.cuda()
def __call__(self, x):
if isinstance(x, torch.Tensor):
dist = (
x.pow(2).sum(1, keepdim=True)
- 2 * torch.matmul(x, self.C)
+ self.Cnorm
)
return dist.argmin(dim=1).cpu().numpy()
else:
dist = (
(x ** 2).sum(1, keepdims=True)
- 2 * np.matmul(x, self.C_np)
+ self.Cnorm_np
)
return np.argmin(dist, axis=1)
def get_feat_iterator(feat_dir, split, nshard, rank):
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
with open(leng_path, "r") as f:
lengs = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengs[:-1]).tolist()
def iterate():
feat = np.load(feat_path, mmap_mode="r")
assert feat.shape[0] == (offsets[-1] + lengs[-1])
for offset, leng in zip(offsets, lengs):
yield feat[offset: offset + leng]
return iterate, len(lengs)
def dump_label(feat_dir, split, km_path, nshard, rank, lab_dir):
apply_kmeans = ApplyKmeans(km_path)
generator, num = get_feat_iterator(feat_dir, split, nshard, rank)
iterator = generator()
lab_path = f"{lab_dir}/{split}_{rank}_{nshard}.km"
os.makedirs(lab_dir, exist_ok=True)
with open(lab_path, "w") as f:
for feat in tqdm.tqdm(iterator, total=num):
# feat = torch.from_numpy(feat).cuda()
lab = apply_kmeans(feat).tolist()
f.write(" ".join(map(str, lab)) + "\n")
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("feat_dir")
parser.add_argument("split")
parser.add_argument("km_path")
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("lab_dir")
args = parser.parse_args()
logging.info(str(args))
dump_label(**vars(args))
| 3,008 | 29.393939 | 69 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/simple_kmeans/dump_mfcc_feature.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import soundfile as sf
import torch
import torchaudio
from feature_utils import get_path_iterator, dump_feature
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_mfcc_feature")
class MfccFeatureReader(object):
def __init__(self, sample_rate):
self.sample_rate = sample_rate
def read_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
assert sr == self.sample_rate, sr
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, path, ref_len=None):
x = self.read_audio(path, ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float()
x = x.view(1, -1)
mfccs = torchaudio.compliance.kaldi.mfcc(
waveform=x,
sample_frequency=self.sample_rate,
use_energy=False,
) # (time, freq)
mfccs = mfccs.transpose(0, 1) # (freq, time)
deltas = torchaudio.functional.compute_deltas(mfccs)
ddeltas = torchaudio.functional.compute_deltas(deltas)
concat = torch.cat([mfccs, deltas, ddeltas], dim=0)
concat = concat.transpose(0, 1).contiguous() # (freq, time)
return concat
def main(tsv_dir, split, nshard, rank, feat_dir, sample_rate):
reader = MfccFeatureReader(sample_rate)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--sample_rate", type=int, default=16000)
args = parser.parse_args()
logger.info(args)
main(**vars(args))
| 2,491 | 30.544304 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/simple_kmeans/feature_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import tqdm
from npy_append_array import NpyAppendArray
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("feature_utils")
def get_shard_range(tot, nshard, rank):
assert rank < nshard and rank >= 0, f"invaid rank/nshard {rank}/{nshard}"
start = round(tot / nshard * rank)
end = round(tot / nshard * (rank + 1))
assert start < end, f"start={start}, end={end}"
logger.info(
f"rank {rank} of {nshard}, process {end-start} "
f"({start}-{end}) out of {tot}"
)
return start, end
def get_path_iterator(tsv, nshard, rank):
with open(tsv, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
start, end = get_shard_range(len(lines), nshard, rank)
lines = lines[start:end]
def iterate():
for line in lines:
subpath, nsample = line.split("\t")
yield f"{root}/{subpath}", int(nsample)
return iterate, len(lines)
def dump_feature(reader, generator, num, split, nshard, rank, feat_dir):
iterator = generator()
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
os.makedirs(feat_dir, exist_ok=True)
if os.path.exists(feat_path):
os.remove(feat_path)
feat_f = NpyAppendArray(feat_path)
with open(leng_path, "w") as leng_f:
for path, nsample in tqdm.tqdm(iterator, total=num):
feat = reader.get_feats(path, nsample)
feat_f.append(feat.cpu().numpy())
leng_f.write(f"{len(feat)}\n")
logger.info("finished successfully")
| 2,008 | 28.985075 | 77 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/simple_kmeans/dump_w2v2_feature.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import fairseq
import soundfile as sf
import torch
import torch.nn.functional as F
from feature_utils import get_path_iterator, dump_feature
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_w2v2_feature")
class Wav2Vec2FeatureReader(object):
def __init__(self, ckpt_path, layer, max_chunk=1600000):
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
self.model = model[0].eval().cuda()
self.task = task
self.layer = layer # assume this is 1-based like HuBERT
self.max_chunk = max_chunk
logger.info(f"TASK CONFIG:\n{self.task.cfg}")
logger.info(f" max_chunk = {self.max_chunk}")
logger.info(f" model:\n{self.model}")
def read_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
assert sr == self.task.cfg.sample_rate, sr
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, path, ref_len=None):
x = self.read_audio(path, ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
x = F.layer_norm(x, x.shape)
x = x.view(1, -1)
feat = []
for start in range(0, x.size(1), self.max_chunk):
x_chunk = x[:, start: start + self.max_chunk]
res = self.model.extract_features(
source=x_chunk,
padding_mask=None,
mask=False,
layer=self.layer - 1,
)
feat_chunk = res["x"]
feat.append(feat_chunk)
return torch.cat(feat, 1).squeeze(0)
def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk):
reader = Wav2Vec2FeatureReader(ckpt_path, layer, max_chunk)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("ckpt_path")
parser.add_argument("layer", type=int)
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--max_chunk", type=int, default=1600000)
args = parser.parse_args()
logger.info(args)
main(**vars(args))
| 3,129 | 31.604167 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/simple_kmeans/learn_kmeans.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
from sklearn.cluster import MiniBatchKMeans
import joblib
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("learn_kmeans")
def get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=1,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
def load_feature_shard(feat_dir, split, nshard, rank, percent):
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
with open(leng_path, "r") as f:
lengs = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengs[:-1]).tolist()
if percent < 0:
return np.load(feat_path, mmap_mode="r")
else:
nsample = int(np.ceil(len(lengs) * percent))
indices = np.random.choice(len(lengs), nsample, replace=False)
feat = np.load(feat_path, mmap_mode="r")
sampled_feat = np.concatenate(
[feat[offsets[i]: offsets[i] + lengs[i]] for i in indices], axis=0
)
logger.info(
(
f"sampled {nsample} utterances, {len(sampled_feat)} frames "
f"from shard {rank}/{nshard}"
)
)
return sampled_feat
def load_feature(feat_dir, split, nshard, seed, percent):
assert percent <= 1.0
feat = np.concatenate(
[
load_feature_shard(feat_dir, split, nshard, r, percent)
for r in range(nshard)
],
axis=0,
)
logging.info(f"loaded feature with dimension {feat.shape}")
return feat
def learn_kmeans(
feat_dir,
split,
nshard,
km_path,
n_clusters,
seed,
percent,
init,
max_iter,
batch_size,
tol,
n_init,
reassignment_ratio,
max_no_improvement,
):
np.random.seed(seed)
feat = load_feature(feat_dir, split, nshard, seed, percent)
km_model = get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
)
km_model.fit(feat)
joblib.dump(km_model, km_path)
inertia = -km_model.score(feat) / len(feat)
logger.info("total intertia: %.5f", inertia)
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("feat_dir", type=str)
parser.add_argument("split", type=str)
parser.add_argument("nshard", type=int)
parser.add_argument("km_path", type=str)
parser.add_argument("n_clusters", type=int)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--percent", default=-1, type=float, help="sample a subset; -1 for all"
)
parser.add_argument("--init", default="k-means++")
parser.add_argument("--max_iter", default=100, type=int)
parser.add_argument("--batch_size", default=10000, type=int)
parser.add_argument("--tol", default=0.0, type=float)
parser.add_argument("--max_no_improvement", default=100, type=int)
parser.add_argument("--n_init", default=20, type=int)
parser.add_argument("--reassignment_ratio", default=0.0, type=float)
args = parser.parse_args()
logging.info(str(args))
learn_kmeans(**vars(args))
| 4,000 | 26.217687 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/hubert/simple_kmeans/dump_hubert_feature.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import fairseq
import soundfile as sf
import torch
import torch.nn.functional as F
from feature_utils import get_path_iterator, dump_feature
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_hubert_feature")
class HubertFeatureReader(object):
def __init__(self, ckpt_path, layer, max_chunk=1600000):
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
self.model = model[0].eval().cuda()
self.task = task
self.layer = layer
self.max_chunk = max_chunk
logger.info(f"TASK CONFIG:\n{self.task.cfg}")
logger.info(f" max_chunk = {self.max_chunk}")
def read_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
assert sr == self.task.cfg.sample_rate, sr
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, path, ref_len=None):
x = self.read_audio(path, ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
x = F.layer_norm(x, x.shape)
x = x.view(1, -1)
feat = []
for start in range(0, x.size(1), self.max_chunk):
x_chunk = x[:, start: start + self.max_chunk]
feat_chunk, _ = self.model.extract_features(
source=x_chunk,
padding_mask=None,
mask=False,
output_layer=self.layer,
)
feat.append(feat_chunk)
return torch.cat(feat, 1).squeeze(0)
def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk):
reader = HubertFeatureReader(ckpt_path, layer, max_chunk)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("ckpt_path")
parser.add_argument("layer", type=int)
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--max_chunk", type=int, default=1600000)
args = parser.parse_args()
logger.info(args)
main(**vars(args))
| 3,018 | 31.117021 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/speech_to_text/prep_covost_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class CoVoST(Dataset):
"""Create a Dataset for CoVoST (https://github.com/facebookresearch/covost).
Args:
root (str): root path to the dataset and generated manifests/features
source_language (str): source (audio) language
target_language (str, optional): target (text) language,
None for no translation (default: None)
version (int, optional): CoVoST version. (default: 2)
download (bool, optional): Whether to download the dataset if it is not
found at root path. (default: ``False``).
"""
COVOST_URL_TEMPLATE = (
"https://dl.fbaipublicfiles.com/covost/"
"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
)
VERSIONS = {2}
SPLITS = ["train", "dev", "test"]
XX_EN_LANGUAGES = {
1: ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn", "zh-CN"],
2: [
"fr",
"de",
"es",
"ca",
"it",
"ru",
"zh-CN",
"pt",
"fa",
"et",
"mn",
"nl",
"tr",
"ar",
"sv-SE",
"lv",
"sl",
"ta",
"ja",
"id",
"cy",
],
}
EN_XX_LANGUAGES = {
1: [],
2: [
"de",
"tr",
"fa",
"sv-SE",
"mn",
"zh-CN",
"cy",
"ca",
"sl",
"et",
"id",
"ar",
"ta",
"lv",
"ja",
],
}
def __init__(
self,
root: str,
split: str,
source_language: str,
target_language: Optional[str] = None,
version: int = 2,
) -> None:
assert version in self.VERSIONS and split in self.SPLITS
assert source_language is not None
self.no_translation = target_language is None
if not self.no_translation:
assert "en" in {source_language, target_language}
if source_language == "en":
assert target_language in self.EN_XX_LANGUAGES[version]
else:
assert source_language in self.XX_EN_LANGUAGES[version]
else:
# Hack here so that we can get "split" column from CoVoST TSV.
# Note that we use CoVoST train split for ASR which is an extension
# to Common Voice train split.
target_language = "de" if source_language == "en" else "en"
self.root: Path = Path(root)
cv_tsv_path = self.root / "validated.tsv"
assert cv_tsv_path.is_file()
covost_url = self.COVOST_URL_TEMPLATE.format(
src_lang=source_language, tgt_lang=target_language
)
covost_archive = self.root / Path(covost_url).name
if not covost_archive.is_file():
download_url(covost_url, self.root.as_posix(), hash_value=None)
extract_archive(covost_archive.as_posix())
cv_tsv = load_df_from_tsv(cv_tsv_path)
covost_tsv = load_df_from_tsv(
self.root / Path(covost_url).name.replace(".tar.gz", "")
)
df = pd.merge(
left=cv_tsv[["path", "sentence", "client_id"]],
right=covost_tsv[["path", "translation", "split"]],
how="inner",
on="path",
)
if split == "train":
df = df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
else:
df = df[df["split"] == split]
data = df.to_dict(orient="index").items()
data = [v for k, v in sorted(data, key=lambda x: x[0])]
self.data = []
for e in data:
try:
path = self.root / "clips" / e["path"]
_ = torchaudio.info(path.as_posix())
self.data.append(e)
except RuntimeError:
pass
def __getitem__(
self, n: int
) -> Tuple[Tensor, int, str, str, Optional[str], str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, sentence, translation, speaker_id,
sample_id)``
"""
data = self.data[n]
path = self.root / "clips" / data["path"]
waveform, sample_rate = torchaudio.load(path)
sentence = data["sentence"]
translation = None if self.no_translation else data["translation"]
speaker_id = data["client_id"]
_id = data["path"].replace(".mp3", "")
return waveform, sample_rate, sentence, translation, speaker_id, _id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
# Extract features
feature_root = root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in CoVoST.SPLITS:
print(f"Fetching split {split}...")
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, feature_root / f"{utt_id}.npy"
)
# Pack features into ZIP
zip_path = root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
task = f"asr_{args.src_lang}"
if args.tgt_lang is not None:
task = f"st_{args.src_lang}_{args.tgt_lang}"
for split in CoVoST.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt)
manifest["speaker"].append(speaker_id)
is_train_split = split.startswith("train")
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, root / f"{split}_{task}.tsv")
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
root / spm_filename_prefix,
args.vocab_type,
args.vocab_size
)
# Generate config YAML
gen_config_yaml(
root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{task}.yaml",
specaugment_policy="lb",
)
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-root", "-d", required=True, type=str,
help="data root with sub-folders for each language <root>/<src_lang>"
)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=1000, type=int)
parser.add_argument("--src-lang", "-s", required=True, type=str)
parser.add_argument("--tgt-lang", "-t", type=str)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 8,909 | 30.821429 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/speech_to_text/prep_mtedx_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = [
"id", "audio", "n_frames", "tgt_text", "speaker", "tgt_lang"
]
class mTEDx(Dataset):
"""
Create a Dataset for Multilingual TEDx.
Each item is a tuple of the form: waveform, sample_rate, source utterance,
target utterance, speaker_id, utterance_id
"""
SPLITS = ["train", "valid", "test"]
LANGPAIRS = ["es-es", "fr-fr", "pt-pt", "it-it", "ru-ru", "el-el", "ar-ar",
"de-de", "es-en", "es-fr", "es-pt", "es-it", "fr-en", "fr-es",
"fr-pt", "pt-en", "pt-es", "it-en", "it-es", "ru-en", "el-en"]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGPAIRS
_root = Path(root) / f"{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print(
"Please install PyYAML to load the Multilingual TEDx YAML files"
)
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
src, tgt = lang.split("-")
for _lang in [src, tgt]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_filename = wav_filename.replace(".wav", ".flac")
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: float(x["offset"]))
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment[src],
segment[tgt],
segment["speaker_id"],
tgt,
_id,
)
)
def __getitem__(
self, n: int
) -> Tuple[torch.Tensor, int, str, str, str, str, str]:
wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, tgt_lang, \
utt_id = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
for lang in mTEDx.LANGPAIRS:
cur_root = root / f"{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80")
audio_root.mkdir(exist_ok=True)
for split in mTEDx.SPLITS:
print(f"Fetching split {split}...")
dataset = mTEDx(root.as_posix(), lang, split)
if args.use_audio_input:
print("Converting audios...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
tgt_sample_rate = 16_000
_wavform, _ = convert_waveform(
waveform, sample_rate, to_mono=True,
to_sample_rate=tgt_sample_rate
)
sf.write(
(audio_root / f"{utt_id}.flac").as_posix(),
_wavform.numpy(), tgt_sample_rate
)
else:
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, audio_root / f"{utt_id}.npy"
)
# Pack features into ZIP
zip_path = cur_root / f"{audio_root.name}.zip"
print("ZIPing audios/features...")
create_zip(audio_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in mTEDx.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
ds = mTEDx(args.data_root, lang, split)
for _, _, src_utt, tgt_utt, spk_id, tgt_lang, utt_id in tqdm(ds):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(
src_utt if args.task == "asr" else tgt_utt
)
manifest["speaker"].append(spk_id)
manifest["tgt_lang"].append(tgt_lang)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
if args.use_audio_input:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy=None,
extra={"use_audio_input": True}
)
else:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
)
# Clean up
shutil.rmtree(audio_root)
def process_joint(args):
cur_root = Path(args.data_root)
assert all((cur_root / f"{lang}").is_dir() for lang in mTEDx.LANGPAIRS), \
"do not have downloaded data available for all languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in mTEDx.LANGPAIRS:
tsv_path = cur_root / f"{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.joint:
# Add tgt_lang tags to dict
special_symbols = list(
{f'<lang:{lang.split("-")[1]}>' for lang in mTEDx.LANGPAIRS}
)
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.joint),
)
# Make symbolic links to manifests
for lang in mTEDx.LANGPAIRS:
for split in mTEDx.SPLITS:
src_path = cur_root / f"{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, choices=["asr", "st"])
parser.add_argument("--joint", action="store_true", help="")
parser.add_argument("--use-audio-input", action="store_true")
args = parser.parse_args()
if args.joint:
process_joint(args)
else:
process(args)
if __name__ == "__main__":
main()
| 10,168 | 36.386029 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/speech_to_text/prep_librispeech_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from torchaudio.datasets import LIBRISPEECH
from tqdm import tqdm
log = logging.getLogger(__name__)
SPLITS = [
"train-clean-100",
"train-clean-360",
"train-other-500",
"dev-clean",
"dev-other",
"test-clean",
"test-other",
]
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
def process(args):
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
# Extract features
feature_root = out_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f"Fetching split {split}...")
dataset = LIBRISPEECH(out_root.as_posix(), url=split, download=True)
print("Extracting log mel filter bank features...")
for wav, sample_rate, _, spk_id, chapter_no, utt_no in tqdm(dataset):
sample_id = f"{spk_id}-{chapter_no}-{utt_no}"
extract_fbank_features(
wav, sample_rate, feature_root / f"{sample_id}.npy"
)
# Pack features into ZIP
zip_path = out_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for _, _, utt, spk_id, chapter_no, utt_no in tqdm(dataset):
sample_id = f"{spk_id}-{chapter_no}-{utt_no}"
manifest["id"].append(sample_id)
manifest["audio"].append(audio_paths[sample_id])
manifest["n_frames"].append(audio_lengths[sample_id])
manifest["tgt_text"].append(utt.lower())
manifest["speaker"].append(spk_id)
save_df_to_tsv(
pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv"
)
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocab
vocab_size = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
out_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
gen_config_yaml(
out_root,
spm_filename=spm_filename_prefix + ".model",
specaugment_policy="ld"
)
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-root", "-o", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=10000, type=int)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 3,623 | 29.2 | 77 |
py
|
sign-topic
|
sign-topic-main/examples/speech_to_text/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
from pathlib import Path
import zipfile
from functools import reduce
from multiprocessing import cpu_count
from typing import Any, Dict, List, Optional, Union
import io
import numpy as np
import pandas as pd
import sentencepiece as sp
from fairseq.data.audio.audio_utils import (
convert_waveform, _get_kaldi_fbank, _get_torchaudio_fbank, is_npy_data,
is_sf_audio_data
)
import torch
import soundfile as sf
from tqdm import tqdm
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
def extract_fbank_features(
waveform: torch.FloatTensor,
sample_rate: int,
output_path: Optional[Path] = None,
n_mel_bins: int = 80,
overwrite: bool = False,
):
if output_path is not None and output_path.is_file() and not overwrite:
return
_waveform, _ = convert_waveform(waveform, sample_rate, to_mono=True)
# Kaldi compliance: 16-bit signed integers
_waveform = _waveform * (2 ** 15)
_waveform = _waveform[0].numpy()
features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
raise ImportError(
"Please install pyKaldi or torchaudio to enable fbank feature extraction"
)
if output_path is not None:
np.save(output_path.as_posix(), features)
return features
def create_zip(data_root: Path, zip_path: Path):
paths = list(data_root.glob("*.npy"))
paths.extend(data_root.glob("*.flac"))
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f:
for path in tqdm(paths):
f.write(path, arcname=path.name)
def get_zip_manifest(
zip_path: Path, zip_root: Optional[Path] = None, is_audio=False
):
_zip_path = Path.joinpath(zip_root or Path(""), zip_path)
with zipfile.ZipFile(_zip_path, mode="r") as f:
info = f.infolist()
paths, lengths = {}, {}
for i in tqdm(info):
utt_id = Path(i.filename).stem
offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size
paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}"
with open(_zip_path, "rb") as f:
f.seek(offset)
byte_data = f.read(file_size)
assert len(byte_data) > 1
if is_audio:
assert is_sf_audio_data(byte_data), i
else:
assert is_npy_data(byte_data), i
byte_data_fp = io.BytesIO(byte_data)
if is_audio:
lengths[utt_id] = sf.info(byte_data_fp).frames
else:
lengths[utt_id] = np.load(byte_data_fp).shape[0]
return paths, lengths
def gen_config_yaml(
manifest_root: Path,
spm_filename: Optional[str] = None,
vocab_name: Optional[str] = None,
yaml_filename: str = "config.yaml",
specaugment_policy: Optional[str] = "lb",
prepend_tgt_lang_tag: bool = False,
sampling_alpha: Optional[float] = None,
input_channels: Optional[int] = 1,
input_feat_per_channel: Optional[int] = 80,
audio_root: str = "",
cmvn_type: str = "utterance",
gcmvn_path: Optional[Path] = None,
extra=None
):
manifest_root = manifest_root.absolute()
writer = S2TDataConfigWriter(manifest_root / yaml_filename)
assert spm_filename is not None or vocab_name is not None
vocab_name = spm_filename.replace(".model", ".txt") if vocab_name is None \
else vocab_name
writer.set_vocab_filename(vocab_name)
if input_channels is not None:
writer.set_input_channels(input_channels)
if input_feat_per_channel is not None:
writer.set_input_feat_per_channel(input_feat_per_channel)
specaugment_setters = {
"lb": writer.set_specaugment_lb_policy,
"ld": writer.set_specaugment_ld_policy,
"sm": writer.set_specaugment_sm_policy,
"ss": writer.set_specaugment_ss_policy,
}
specaugment_setter = specaugment_setters.get(specaugment_policy, None)
if specaugment_setter is not None:
specaugment_setter()
if spm_filename is not None:
writer.set_bpe_tokenizer(
{
"bpe": "sentencepiece",
"sentencepiece_model": (manifest_root / spm_filename).as_posix(),
}
)
if prepend_tgt_lang_tag:
writer.set_prepend_tgt_lang_tag(True)
if sampling_alpha is not None:
writer.set_sampling_alpha(sampling_alpha)
if cmvn_type not in ["global", "utterance"]:
raise NotImplementedError
if specaugment_policy is not None:
writer.set_feature_transforms(
"_train", [f"{cmvn_type}_cmvn", "specaugment"]
)
writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"])
if cmvn_type == "global":
if gcmvn_path is None:
raise ValueError("Please provide path of global cmvn file.")
else:
writer.set_global_cmvn(gcmvn_path.as_posix())
if len(audio_root) > 0:
writer.set_audio_root(audio_root)
if extra is not None:
writer.set_extra(extra)
writer.flush()
def load_df_from_tsv(path: Union[str, Path]) -> pd.DataFrame:
_path = path if isinstance(path, str) else path.as_posix()
return pd.read_csv(
_path,
sep="\t",
header=0,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
na_filter=False,
)
def save_df_to_tsv(dataframe, path: Union[str, Path]):
_path = path if isinstance(path, str) else path.as_posix()
dataframe.to_csv(
_path,
sep="\t",
header=True,
index=False,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
)
def load_tsv_to_dicts(path: Union[str, Path]) -> List[dict]:
with open(path, "r") as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
rows = [dict(e) for e in reader]
return rows
def filter_manifest_df(
df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000
):
filters = {
"no speech": df["audio"] == "",
f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames,
"empty sentence": df["tgt_text"] == "",
}
if is_train_split:
filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames
if extra_filters is not None:
filters.update(extra_filters)
invalid = reduce(lambda x, y: x | y, filters.values())
valid = ~invalid
print(
"| "
+ ", ".join(f"{n}: {f.sum()}" for n, f in filters.items())
+ f", total {invalid.sum()} filtered, {valid.sum()} remained."
)
return df[valid]
def cal_gcmvn_stats(features_list):
features = np.concatenate(features_list)
square_sums = (features ** 2).sum(axis=0)
mean = features.mean(axis=0)
features = np.subtract(features, mean)
var = square_sums / features.shape[0] - mean ** 2
std = np.sqrt(np.maximum(var, 1e-8))
return {"mean": mean.astype("float32"), "std": std.astype("float32")}
class S2TDataConfigWriter(object):
DEFAULT_VOCAB_FILENAME = "dict.txt"
DEFAULT_INPUT_FEAT_PER_CHANNEL = 80
DEFAULT_INPUT_CHANNELS = 1
def __init__(self, yaml_path: Path):
try:
import yaml
except ImportError:
print("Please install PyYAML for S2T data config YAML files")
self.yaml = yaml
self.yaml_path = yaml_path
self.config = {}
def flush(self):
with open(self.yaml_path, "w") as f:
self.yaml.dump(self.config, f)
def set_audio_root(self, audio_root=""):
self.config["audio_root"] = audio_root
def set_vocab_filename(self, vocab_filename: str = "dict.txt"):
self.config["vocab_filename"] = vocab_filename
def set_specaugment(
self,
time_wrap_w: int,
freq_mask_n: int,
freq_mask_f: int,
time_mask_n: int,
time_mask_t: int,
time_mask_p: float,
):
self.config["specaugment"] = {
"time_wrap_W": time_wrap_w,
"freq_mask_N": freq_mask_n,
"freq_mask_F": freq_mask_f,
"time_mask_N": time_mask_n,
"time_mask_T": time_mask_t,
"time_mask_p": time_mask_p,
}
def set_specaugment_lb_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=1,
freq_mask_f=27,
time_mask_n=1,
time_mask_t=100,
time_mask_p=1.0,
)
def set_specaugment_ld_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=27,
time_mask_n=2,
time_mask_t=100,
time_mask_p=1.0,
)
def set_specaugment_sm_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=15,
time_mask_n=2,
time_mask_t=70,
time_mask_p=0.2,
)
def set_specaugment_ss_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=27,
time_mask_n=2,
time_mask_t=70,
time_mask_p=0.2,
)
def set_input_channels(self, input_channels: int = 1):
self.config["input_channels"] = input_channels
def set_input_feat_per_channel(self, input_feat_per_channel: int = 80):
self.config["input_feat_per_channel"] = input_feat_per_channel
def set_bpe_tokenizer(self, bpe_tokenizer: Dict[str, Any]):
self.config["bpe_tokenizer"] = bpe_tokenizer
def set_global_cmvn(self, stats_npz_path: str):
self.config["global_cmvn"] = {"stats_npz_path": stats_npz_path}
def set_feature_transforms(self, split: str, transforms: List[str]):
if "transforms" not in self.config:
self.config["transforms"] = {}
self.config["transforms"][split] = transforms
def set_prepend_tgt_lang_tag(self, flag: bool = True):
self.config["prepend_tgt_lang_tag"] = flag
def set_sampling_alpha(self, sampling_alpha: float = 1.0):
self.config["sampling_alpha"] = sampling_alpha
def set_extra(self, data):
self.config.update(data)
| 12,275 | 30.96875 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/speech_to_text/prep_mustc_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import numpy as np
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
cal_gcmvn_stats,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class MUSTC(Dataset):
"""
Create a Dataset for MuST-C. Each item is a tuple of the form:
waveform, sample_rate, source utterance, target utterance, speaker_id,
utterance_id
"""
SPLITS = ["train", "dev", "tst-COMMON", "tst-HE"]
LANGUAGES = ["de", "es", "fr", "it", "nl", "pt", "ro", "ru"]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGUAGES
_root = Path(root) / f"en-{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the MuST-C YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
for _lang in ["en", lang]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: x["offset"])
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment["en"],
segment[lang],
segment["speaker_id"],
_id,
)
)
def __getitem__(
self, n: int
) -> Tuple[torch.Tensor, int, str, str, str, str]:
wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, \
utt_id = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, utt_id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
for lang in MUSTC.LANGUAGES:
cur_root = root / f"en-{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80")
audio_root.mkdir(exist_ok=True)
for split in MUSTC.SPLITS:
print(f"Fetching split {split}...")
dataset = MUSTC(root.as_posix(), lang, split)
if args.use_audio_input:
print("Converting audios...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
tgt_sample_rate = 16_000
_wavform, _ = convert_waveform(
waveform, sample_rate, to_mono=True,
to_sample_rate=tgt_sample_rate
)
sf.write(
(audio_root / f"{utt_id}.flac").as_posix(),
_wavform.T.numpy(), tgt_sample_rate
)
else:
print("Extracting log mel filter bank features...")
gcmvn_feature_list = []
if split == 'train' and args.cmvn_type == "global":
print("And estimating cepstral mean and variance stats...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
features = extract_fbank_features(
waveform, sample_rate, audio_root / f"{utt_id}.npy"
)
if split == 'train' and args.cmvn_type == "global":
if len(gcmvn_feature_list) < args.gcmvn_max_num:
gcmvn_feature_list.append(features)
if split == 'train' and args.cmvn_type == "global":
# Estimate and save cmv
stats = cal_gcmvn_stats(gcmvn_feature_list)
with open(cur_root / "gcmvn.npz", "wb") as f:
np.savez(f, mean=stats["mean"], std=stats["std"])
# Pack features into ZIP
zip_path = cur_root / f"{audio_root.name}.zip"
print("ZIPing audios/features...")
create_zip(audio_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(
zip_path,
is_audio=args.use_audio_input,
)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in MUSTC.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = MUSTC(args.data_root, lang, split)
for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(
src_utt if args.task == "asr" else tgt_utt
)
manifest["speaker"].append(speaker_id)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
if args.use_audio_input:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy=None,
extra={"use_audio_input": True}
)
else:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
gcmvn_path=(
cur_root / "gcmvn.npz" if args.cmvn_type == "global"
else None
),
)
# Clean up
shutil.rmtree(audio_root)
def process_joint(args):
cur_root = Path(args.data_root)
assert all(
(cur_root / f"en-{lang}").is_dir() for lang in MUSTC.LANGUAGES
), "do not have downloaded data available for all 8 languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in MUSTC.LANGUAGES:
tsv_path = cur_root / f"en-{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.task == 'st':
special_symbols = [f'<lang:{lang}>' for lang in MUSTC.LANGUAGES]
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.task == "st"),
)
# Make symbolic links to manifests
for lang in MUSTC.LANGUAGES:
for split in MUSTC.SPLITS:
src_path = cur_root / f"en-{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, choices=["asr", "st"])
parser.add_argument("--joint", action="store_true", help="")
parser.add_argument(
"--cmvn-type", default="utterance",
choices=["global", "utterance"],
help="The type of cepstral mean and variance normalization"
)
parser.add_argument(
"--gcmvn-max-num", default=150000, type=int,
help="Maximum number of sentences to use to estimate global mean and "
"variance"
)
parser.add_argument("--use-audio-input", action="store_true")
args = parser.parse_args()
if args.joint:
process_joint(args)
else:
process(args)
if __name__ == "__main__":
main()
| 11,080 | 36.562712 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/speech_to_text/seg_mustc_data.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import soundfile as sf
from examples.speech_to_text.prep_mustc_data import (
MUSTC
)
from tqdm import tqdm
log = logging.getLogger(__name__)
def main(args):
root = Path(args.data_root).absolute()
lang = args.lang
split = args.split
cur_root = root / f"en-{lang}"
assert cur_root.is_dir(), (
f"{cur_root.as_posix()} does not exist. Skipped."
)
dataset = MUSTC(root.as_posix(), lang, split)
output = Path(args.output).absolute()
output.mkdir(exist_ok=True)
f_text = open(output / f"{split}.{lang}", "w")
f_wav_list = open(output / f"{split}.wav_list", "w")
for waveform, sample_rate, _, text, _, utt_id in tqdm(dataset):
sf.write(
output / f"{utt_id}.wav",
waveform.squeeze(0).numpy(),
samplerate=int(sample_rate)
)
f_text.write(text + "\n")
f_wav_list.write(str(output / f"{utt_id}.wav") + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument("--task", required=True, type=str, choices=["asr", "st"])
parser.add_argument("--lang", required=True, type=str)
parser.add_argument("--output", required=True, type=str)
parser.add_argument("--split", required=True, choices=MUSTC.SPLITS)
args = parser.parse_args()
main(args)
| 1,645 | 28.927273 | 81 |
py
|
sign-topic
|
sign-topic-main/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py
|
import math
import os
import json
import numpy as np
import torch
import torchaudio.compliance.kaldi as kaldi
import yaml
from fairseq import checkpoint_utils, tasks
from fairseq.file_io import PathManager
try:
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from simuleval.agents import SpeechAgent
from simuleval.states import ListEntry, SpeechStates
except ImportError:
print("Please install simuleval 'pip install simuleval'")
SHIFT_SIZE = 10
WINDOW_SIZE = 25
SAMPLE_RATE = 16000
FEATURE_DIM = 80
BOW_PREFIX = "\u2581"
class OnlineFeatureExtractor:
"""
Extract speech feature on the fly.
"""
def __init__(self, args):
self.shift_size = args.shift_size
self.window_size = args.window_size
assert self.window_size >= self.shift_size
self.sample_rate = args.sample_rate
self.feature_dim = args.feature_dim
self.num_samples_per_shift = int(self.shift_size * self.sample_rate / 1000)
self.num_samples_per_window = int(self.window_size * self.sample_rate / 1000)
self.len_ms_to_samples = lambda x: x * self.sample_rate / 1000
self.previous_residual_samples = []
self.global_cmvn = args.global_cmvn
def clear_cache(self):
self.previous_residual_samples = []
def __call__(self, new_samples):
samples = self.previous_residual_samples + new_samples
if len(samples) < self.num_samples_per_window:
self.previous_residual_samples = samples
return
# num_frames is the number of frames from the new segment
num_frames = math.floor(
(len(samples) - self.len_ms_to_samples(self.window_size - self.shift_size))
/ self.num_samples_per_shift
)
# the number of frames used for feature extraction
# including some part of thte previous segment
effective_num_samples = int(
num_frames * self.len_ms_to_samples(self.shift_size)
+ self.len_ms_to_samples(self.window_size - self.shift_size)
)
input_samples = samples[:effective_num_samples]
self.previous_residual_samples = samples[
num_frames * self.num_samples_per_shift:
]
torch.manual_seed(1)
output = kaldi.fbank(
torch.FloatTensor(input_samples).unsqueeze(0),
num_mel_bins=self.feature_dim,
frame_length=self.window_size,
frame_shift=self.shift_size,
).numpy()
output = self.transform(output)
return torch.from_numpy(output)
def transform(self, input):
if self.global_cmvn is None:
return input
mean = self.global_cmvn["mean"]
std = self.global_cmvn["std"]
x = np.subtract(input, mean)
x = np.divide(x, std)
return x
class TensorListEntry(ListEntry):
"""
Data structure to store a list of tensor.
"""
def append(self, value):
if len(self.value) == 0:
self.value = value
return
self.value = torch.cat([self.value] + [value], dim=0)
def info(self):
return {
"type": str(self.new_value_type),
"length": self.__len__(),
"value": "" if type(self.value) is list else self.value.size(),
}
class FairseqSimulSTAgent(SpeechAgent):
speech_segment_size = 40 # in ms, 4 pooling ratio * 10 ms step size
def __init__(self, args):
super().__init__(args)
self.eos = DEFAULT_EOS
self.gpu = getattr(args, "gpu", False)
self.args = args
self.load_model_vocab(args)
if getattr(
self.model.decoder.layers[0].encoder_attn,
'pre_decision_ratio',
None
) is not None:
self.speech_segment_size *= (
self.model.decoder.layers[0].encoder_attn.pre_decision_ratio
)
args.global_cmvn = None
if args.config:
with open(os.path.join(args.data_bin, args.config), "r") as f:
config = yaml.load(f, Loader=yaml.BaseLoader)
if "global_cmvn" in config:
args.global_cmvn = np.load(config["global_cmvn"]["stats_npz_path"])
if args.global_stats:
with PathManager.open(args.global_stats, "r") as f:
global_cmvn = json.loads(f.read())
self.global_cmvn = {"mean": global_cmvn["mean"], "std": global_cmvn["stddev"]}
self.feature_extractor = OnlineFeatureExtractor(args)
self.max_len = args.max_len
self.force_finish = args.force_finish
torch.set_grad_enabled(False)
def build_states(self, args, client, sentence_id):
# Initialize states here, for example add customized entry to states
# This function will be called at beginning of every new sentence
states = SpeechStates(args, client, sentence_id, self)
self.initialize_states(states)
return states
def to_device(self, tensor):
if self.gpu:
return tensor.cuda()
else:
return tensor.cpu()
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--model-path', type=str, required=True,
help='path to your pretrained model.')
parser.add_argument("--data-bin", type=str, required=True,
help="Path of data binary")
parser.add_argument("--config", type=str, default=None,
help="Path to config yaml file")
parser.add_argument("--global-stats", type=str, default=None,
help="Path to json file containing cmvn stats")
parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece",
help="Subword splitter type for target text")
parser.add_argument("--tgt-splitter-path", type=str, default=None,
help="Subword splitter model path for target text")
parser.add_argument("--user-dir", type=str, default="examples/simultaneous_translation",
help="User directory for simultaneous translation")
parser.add_argument("--max-len", type=int, default=200,
help="Max length of translation")
parser.add_argument("--force-finish", default=False, action="store_true",
help="Force the model to finish the hypothsis if the source is not finished")
parser.add_argument("--shift-size", type=int, default=SHIFT_SIZE,
help="Shift size of feature extraction window.")
parser.add_argument("--window-size", type=int, default=WINDOW_SIZE,
help="Window size of feature extraction window.")
parser.add_argument("--sample-rate", type=int, default=SAMPLE_RATE,
help="Sample rate")
parser.add_argument("--feature-dim", type=int, default=FEATURE_DIM,
help="Acoustic feature dimension.")
# fmt: on
return parser
def load_model_vocab(self, args):
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
task_args = state["cfg"]["task"]
task_args.data = args.data_bin
if args.config is not None:
task_args.config_yaml = args.config
task = tasks.setup_task(task_args)
# build model for ensemble
state["cfg"]["model"].load_pretrained_encoder_from = None
state["cfg"]["model"].load_pretrained_decoder_from = None
self.model = task.build_model(state["cfg"]["model"])
self.model.load_state_dict(state["model"], strict=True)
self.model.eval()
self.model.share_memory()
if self.gpu:
self.model.cuda()
# Set dictionary
self.dict = {}
self.dict["tgt"] = task.target_dictionary
def initialize_states(self, states):
self.feature_extractor.clear_cache()
states.units.source = TensorListEntry()
states.units.target = ListEntry()
states.incremental_states = dict()
def segment_to_units(self, segment, states):
# Convert speech samples to features
features = self.feature_extractor(segment)
if features is not None:
return [features]
else:
return []
def units_to_segment(self, units, states):
# Merge sub word to full word.
if self.model.decoder.dictionary.eos() == units[0]:
return DEFAULT_EOS
segment = []
if None in units.value:
units.value.remove(None)
for index in units:
if index is None:
units.pop()
token = self.model.decoder.dictionary.string([index])
if token.startswith(BOW_PREFIX):
if len(segment) == 0:
segment += [token.replace(BOW_PREFIX, "")]
else:
for j in range(len(segment)):
units.pop()
string_to_return = ["".join(segment)]
if self.model.decoder.dictionary.eos() == units[0]:
string_to_return += [DEFAULT_EOS]
return string_to_return
else:
segment += [token.replace(BOW_PREFIX, "")]
if (
len(units) > 0
and self.model.decoder.dictionary.eos() == units[-1]
or len(states.units.target) > self.max_len
):
tokens = [self.model.decoder.dictionary.string([unit]) for unit in units]
return ["".join(tokens).replace(BOW_PREFIX, "")] + [DEFAULT_EOS]
return None
def update_model_encoder(self, states):
if len(states.units.source) == 0:
return
src_indices = self.to_device(
states.units.source.value.unsqueeze(0)
)
src_lengths = self.to_device(
torch.LongTensor([states.units.source.value.size(0)])
)
states.encoder_states = self.model.encoder(src_indices, src_lengths)
torch.cuda.empty_cache()
def update_states_read(self, states):
# Happens after a read action.
self.update_model_encoder(states)
def policy(self, states):
if not getattr(states, "encoder_states", None):
return READ_ACTION
tgt_indices = self.to_device(
torch.LongTensor(
[self.model.decoder.dictionary.eos()]
+ [x for x in states.units.target.value if x is not None]
).unsqueeze(0)
)
states.incremental_states["steps"] = {
"src": states.encoder_states["encoder_out"][0].size(0),
"tgt": 1 + len(states.units.target),
}
states.incremental_states["online"] = {"only": torch.tensor(not states.finish_read())}
x, outputs = self.model.decoder.forward(
prev_output_tokens=tgt_indices,
encoder_out=states.encoder_states,
incremental_state=states.incremental_states,
)
states.decoder_out = x
states.decoder_out_extra = outputs
torch.cuda.empty_cache()
if outputs.action == 0:
return READ_ACTION
else:
return WRITE_ACTION
def predict(self, states):
decoder_states = states.decoder_out
lprobs = self.model.get_normalized_probs(
[decoder_states[:, -1:]], log_probs=True
)
index = lprobs.argmax(dim=-1)
index = index[0, 0].item()
if (
self.force_finish
and index == self.model.decoder.dictionary.eos()
and not states.finish_read()
):
# If we want to force finish the translation
# (don't stop before finish reading), return a None
# self.model.decoder.clear_cache(states.incremental_states)
index = None
return index
| 12,193 | 32.5 | 105 |
py
|
sign-topic
|
sign-topic-main/examples/roberta/preprocess_RACE.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import re
class InputExample:
def __init__(self, paragraph, qa_list, label):
self.paragraph = paragraph
self.qa_list = qa_list
self.label = label
def get_examples(data_dir, set_type):
"""
Extract paragraph and question-answer list from each json file
"""
examples = []
levels = ["middle", "high"]
set_type_c = set_type.split("-")
if len(set_type_c) == 2:
levels = [set_type_c[1]]
set_type = set_type_c[0]
for level in levels:
cur_dir = os.path.join(data_dir, set_type, level)
for filename in os.listdir(cur_dir):
cur_path = os.path.join(cur_dir, filename)
with open(cur_path, "r") as f:
cur_data = json.load(f)
answers = cur_data["answers"]
options = cur_data["options"]
questions = cur_data["questions"]
context = cur_data["article"].replace("\n", " ")
context = re.sub(r"\s+", " ", context)
for i in range(len(answers)):
label = ord(answers[i]) - ord("A")
qa_list = []
question = questions[i]
for j in range(4):
option = options[i][j]
if "_" in question:
qa_cat = question.replace("_", option)
else:
qa_cat = " ".join([question, option])
qa_cat = re.sub(r"\s+", " ", qa_cat)
qa_list.append(qa_cat)
examples.append(InputExample(context, qa_list, label))
return examples
def main():
"""
Helper script to extract paragraphs questions and answers from RACE datasets.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-dir",
help="input directory for downloaded RACE dataset",
)
parser.add_argument(
"--output-dir",
help="output directory for extracted data",
)
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
for set_type in ["train", "dev", "test-middle", "test-high"]:
examples = get_examples(args.input_dir, set_type)
qa_file_paths = [
os.path.join(args.output_dir, set_type + ".input" + str(i + 1))
for i in range(4)
]
qa_files = [open(qa_file_path, "w") for qa_file_path in qa_file_paths]
outf_context_path = os.path.join(args.output_dir, set_type + ".input0")
outf_label_path = os.path.join(args.output_dir, set_type + ".label")
outf_context = open(outf_context_path, "w")
outf_label = open(outf_label_path, "w")
for example in examples:
outf_context.write(example.paragraph + "\n")
for i in range(4):
qa_files[i].write(example.qa_list[i] + "\n")
outf_label.write(str(example.label) + "\n")
for f in qa_files:
f.close()
outf_label.close()
outf_context.close()
if __name__ == "__main__":
main()
| 3,429 | 32.300971 | 81 |
py
|
sign-topic
|
sign-topic-main/examples/roberta/multiprocessing_bpe_encoder.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import sys
from collections import Counter
from multiprocessing import Pool
from fairseq.data.encoders.gpt2_bpe import get_encoder
def main():
"""
Helper script to encode raw text with the GPT-2 BPE using multiple processes.
The encoder.json and vocab.bpe files can be obtained here:
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--encoder-json",
help="path to encoder.json",
)
parser.add_argument(
"--vocab-bpe",
type=str,
help="path to vocab.bpe",
)
parser.add_argument(
"--inputs",
nargs="+",
default=["-"],
help="input files to filter/encode",
)
parser.add_argument(
"--outputs",
nargs="+",
default=["-"],
help="path to save encoded outputs",
)
parser.add_argument(
"--keep-empty",
action="store_true",
help="keep empty lines",
)
parser.add_argument("--workers", type=int, default=20)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
encoder = MultiprocessingEncoder(args)
pool = Pool(args.workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100)
stats = Counter()
for i, (filt, enc_lines) in enumerate(encoded_lines, start=1):
if filt == "PASS":
for enc_line, output_h in zip(enc_lines, outputs):
print(enc_line, file=output_h)
else:
stats["num_filtered_" + filt] += 1
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
for k, v in stats.most_common():
print("[{}] filtered {} lines".format(k, v), file=sys.stderr)
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe)
def encode(self, line):
global bpe
ids = bpe.encode(line)
return list(map(str, ids))
def decode(self, tokens):
global bpe
return bpe.decode(tokens)
def encode_lines(self, lines):
"""
Encode a set of lines. All lines will be encoded together.
"""
enc_lines = []
for line in lines:
line = line.strip()
if len(line) == 0 and not self.args.keep_empty:
return ["EMPTY", None]
tokens = self.encode(line)
enc_lines.append(" ".join(tokens))
return ["PASS", enc_lines]
def decode_lines(self, lines):
dec_lines = []
for line in lines:
tokens = map(int, line.strip().split())
dec_lines.append(self.decode(tokens))
return ["PASS", dec_lines]
if __name__ == "__main__":
main()
| 3,782 | 27.877863 | 81 |
py
|
sign-topic
|
sign-topic-main/examples/roberta/commonsense_qa/commonsense_qa_task.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
from fairseq.data import (
Dictionary,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
@register_task("commonsense_qa")
class CommonsenseQATask(LegacyFairseqTask):
"""Task to finetune RoBERTa for Commonsense QA."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data", metavar="DIR", help="path to data directory; we load <split>.jsonl"
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
parser.add_argument("--num-classes", type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol("<mask>")
self.bpe = encoders.build_bpe(args)
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert (
args.criterion == "sentence_ranking"
), "Must set --criterion=sentence_ranking"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def binarize(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=True,
add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if "answerKey" in example:
label = ord(example["answerKey"]) - ord("A")
labels.append(label)
question = example["question"]["stem"]
assert len(example["question"]["choices"]) == self.args.num_classes
# format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`
question = "Q: " + question
question_toks = binarize(question, append_bos=True)
for i, choice in enumerate(example["question"]["choices"]):
src = "A: " + choice["text"]
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(
len(src_tokens[0]) == len(src_tokens[i])
for i in range(self.args.num_classes)
)
assert len(src_tokens[0]) == len(src_lengths[0])
assert len(labels) == 0 or len(labels) == len(src_tokens[0])
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {
"id": IdDataset(),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens[0], reduce=True),
}
for i in range(self.args.num_classes):
dataset.update(
{
"net_input{}".format(i + 1): {
"src_tokens": RightPadDataset(
src_tokens[i],
pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths[i],
}
}
)
if len(labels) > 0:
dataset.update({"target": RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(
dataset,
# shuffle
sort_order=[np.random.permutation(len(dataset))],
)
print("| Loaded {} with {} samples".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
"sentence_classification_head",
num_classes=1,
)
return model
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
| 6,124 | 31.068063 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/roberta/commonsense_qa/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import commonsense_qa_task # noqa
| 220 | 30.571429 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/roberta/wsc/wsc_task.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import tempfile
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import (
Dictionary,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
SortDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
from . import wsc_utils
@register_task("wsc")
class WSCTask(LegacyFairseqTask):
"""Task to finetune RoBERTa for Winograd Schemas."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data", metavar="DIR", help="path to data directory; we load <split>.jsonl"
)
parser.add_argument(
"--init-token",
type=int,
default=None,
help="add token at the beginning of each batch item",
)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol("<mask>")
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
# hack to handle GPT-2 BPE, which includes leading spaces
if args.bpe == "gpt2":
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>")
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == "wsc", "Must set --criterion=wsc"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool = False):
if self.tokenizer is not None:
s = self.tokenizer.encode(s)
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=append_eos,
add_if_not_exist=False,
).long()
if self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start : mask_start + mask_size] = 1
return toks, mask
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[: pronoun_span.start].text
suffix = sentence[pronoun_span.end :].text_with_ws
# spaCy spans include trailing spaces, but we need to know about
# leading spaces for the GPT-2 BPE
leading_space = (
" " if sentence[: pronoun_span.start].text_with_ws.endswith(" ") else ""
)
trailing_space = " " if pronoun_span.text_with_ws.endswith(" ") else ""
# get noun phrases, excluding pronouns and anything overlapping with the query
cand_spans = wsc_utils.filter_noun_chunks(
wsc_utils.extended_noun_chunks(sentence),
exclude_pronouns=True,
exclude_query=query,
exact_match=False,
)
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_masks = [], []
for cand_span in cand_spans:
toks, mask = self.binarize_with_mask(
cand_span.text,
prefix,
suffix,
leading_space,
trailing_space,
)
cand_toks.append(toks)
cand_masks.append(mask)
# collate candidates
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert cand_toks.size() == cand_masks.size()
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, [1] * len(labels))
dataset = {
"id": IdDataset(),
"query_tokens": query_tokens,
"query_masks": query_masks,
"candidate_tokens": candidate_tokens,
"candidate_masks": candidate_masks,
"labels": labels,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + "\n").encode("utf-8"))
dataset = self.load_dataset(
"disambiguate_pronoun",
data_path=h.name,
return_only=True,
)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
logits, _ = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
cand_lprobs = get_lprobs(
sample["candidate_tokens"][0],
sample["candidate_masks"][0],
)
if sample["query_tokens"][0] is not None:
query_lprobs = get_lprobs(
sample["query_tokens"][0].unsqueeze(0),
sample["query_masks"][0].unsqueeze(0),
)
return (query_lprobs >= cand_lprobs).all().item() == 1
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample["candidate_tokens"][0][best_idx]
mask = sample["candidate_masks"][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
@register_task("winogrande")
class WinograndeTask(WSCTask):
"""
Task for WinoGrande dataset. Efficient implementation for Winograd schema
tasks with exactly two candidates, one of which is correct.
"""
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == "winogrande", "Must set --criterion=winogrande"
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
print("| dictionary: {} types".format(len(vocab)))
return cls(args, vocab)
def load_dataset(
self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + ".jsonl")
if not os.path.exists(data_path):
raise FileNotFoundError("Cannot find data: {}".format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == "test"))
for sample in itr:
sentence, pronoun_span, query, cand_text = sample
prefix = sentence[: pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1] :]
leading_space = " " if sentence[: pronoun_span[0]].endswith(" ") else ""
trailing_space = ""
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query,
prefix,
suffix,
leading_space,
trailing_space,
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_mask = self.binarize_with_mask(
cand_text,
prefix,
suffix,
leading_space,
trailing_space,
)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(
ListDataset(tokens, length),
pad_idx=pad_idx,
left_pad=False,
)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(
candidate_tokens, candidate_lengths, self.vocab.pad()
)
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {
"id": IdDataset(),
"query_tokens": query_tokens,
"query_masks": query_masks,
"candidate_tokens": candidate_tokens,
"candidate_masks": candidate_masks,
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
| 13,524 | 32.644279 | 90 |
py
|
sign-topic
|
sign-topic-main/examples/roberta/wsc/wsc_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from functools import lru_cache
def convert_sentence_to_json(sentence):
if "_" in sentence:
prefix, rest = sentence.split("_", 1)
query, rest = rest.split("_", 1)
query_index = len(prefix.rstrip().split(" "))
else:
query, query_index = None, None
prefix, rest = sentence.split("[", 1)
pronoun, rest = rest.split("]", 1)
pronoun_index = len(prefix.rstrip().split(" "))
sentence = sentence.replace("_", "").replace("[", "").replace("]", "")
return {
"idx": 0,
"text": sentence,
"target": {
"span1_index": query_index,
"span1_text": query,
"span2_index": pronoun_index,
"span2_text": pronoun,
},
}
def extended_noun_chunks(sentence):
noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks}
np_start, cur_np = 0, "NONE"
for i, token in enumerate(sentence):
np_type = token.pos_ if token.pos_ in {"NOUN", "PROPN"} else "NONE"
if np_type != cur_np:
if cur_np != "NONE":
noun_chunks.add((np_start, i))
if np_type != "NONE":
np_start = i
cur_np = np_type
if cur_np != "NONE":
noun_chunks.add((np_start, len(sentence)))
return [sentence[s:e] for (s, e) in sorted(noun_chunks)]
def find_token(sentence, start_pos):
found_tok = None
for tok in sentence:
if tok.idx == start_pos:
found_tok = tok
break
return found_tok
def find_span(sentence, search_text, start=0):
search_text = search_text.lower()
for tok in sentence[start:]:
remainder = sentence[tok.i :].text.lower()
if remainder.startswith(search_text):
len_to_consume = len(search_text)
start_idx = tok.idx
for next_tok in sentence[tok.i :]:
end_idx = next_tok.idx + len(next_tok.text)
if end_idx - start_idx == len_to_consume:
span = sentence[tok.i : next_tok.i + 1]
return span
return None
@lru_cache(maxsize=1)
def get_detokenizer():
from sacremoses import MosesDetokenizer
detok = MosesDetokenizer(lang="en")
return detok
@lru_cache(maxsize=1)
def get_spacy_nlp():
import en_core_web_lg
nlp = en_core_web_lg.load()
return nlp
def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False):
detok = get_detokenizer()
nlp = get_spacy_nlp()
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
if positive_only and "label" in sample and not sample["label"]:
# only consider examples where the query is correct
continue
target = sample["target"]
# clean up the query
query = target["span1_text"]
if query is not None:
if "\n" in query:
continue
if query.endswith(".") or query.endswith(","):
query = query[:-1]
# split tokens
tokens = sample["text"].split(" ")
def strip_pronoun(x):
return x.rstrip('.,"')
# find the pronoun
pronoun_idx = target["span2_index"]
pronoun = strip_pronoun(target["span2_text"])
if strip_pronoun(tokens[pronoun_idx]) != pronoun:
# hack: sometimes the index is misaligned
if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun:
pronoun_idx += 1
else:
raise Exception("Misaligned pronoun!")
assert strip_pronoun(tokens[pronoun_idx]) == pronoun
# split tokens before and after the pronoun
before = tokens[:pronoun_idx]
after = tokens[pronoun_idx + 1 :]
# the GPT BPE attaches leading spaces to tokens, so we keep track
# of whether we need spaces before or after the pronoun
leading_space = " " if pronoun_idx > 0 else ""
trailing_space = " " if len(after) > 0 else ""
# detokenize
before = detok.detokenize(before, return_str=True)
pronoun = detok.detokenize([pronoun], return_str=True)
after = detok.detokenize(after, return_str=True)
# hack: when the pronoun ends in a period (or comma), move the
# punctuation to the "after" part
if pronoun.endswith(".") or pronoun.endswith(","):
after = pronoun[-1] + trailing_space + after
pronoun = pronoun[:-1]
# hack: when the "after" part begins with a comma or period, remove
# the trailing space
if after.startswith(".") or after.startswith(","):
trailing_space = ""
# parse sentence with spacy
sentence = nlp(before + leading_space + pronoun + trailing_space + after)
# find pronoun span
start = len(before + leading_space)
first_pronoun_tok = find_token(sentence, start_pos=start)
pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i)
assert pronoun_span.text == pronoun
if eval:
# convert to format where pronoun is surrounded by "[]" and
# query is surrounded by "_"
query_span = find_span(sentence, query)
query_with_ws = "_{}_{}".format(
query_span.text,
(" " if query_span.text_with_ws.endswith(" ") else ""),
)
pronoun_with_ws = "[{}]{}".format(
pronoun_span.text,
(" " if pronoun_span.text_with_ws.endswith(" ") else ""),
)
if query_span.start < pronoun_span.start:
first = (query_span, query_with_ws)
second = (pronoun_span, pronoun_with_ws)
else:
first = (pronoun_span, pronoun_with_ws)
second = (query_span, query_with_ws)
sentence = (
sentence[: first[0].start].text_with_ws
+ first[1]
+ sentence[first[0].end : second[0].start].text_with_ws
+ second[1]
+ sentence[second[0].end :].text
)
yield sentence, sample.get("label", None)
else:
yield sentence, pronoun_span, query, sample.get("label", None)
def winogrande_jsonl_iterator(input_fname, eval=False):
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
sentence, option1, option2 = (
sample["sentence"],
sample["option1"],
sample["option2"],
)
pronoun_span = (sentence.index("_"), sentence.index("_") + 1)
if eval:
query, cand = option1, option2
else:
query = option1 if sample["answer"] == "1" else option2
cand = option2 if sample["answer"] == "1" else option1
yield sentence, pronoun_span, query, cand
def filter_noun_chunks(
chunks, exclude_pronouns=False, exclude_query=None, exact_match=False
):
if exclude_pronouns:
chunks = [
np
for np in chunks
if (np.lemma_ != "-PRON-" and not all(tok.pos_ == "PRON" for tok in np))
]
if exclude_query is not None:
excl_txt = [exclude_query.lower()]
filtered_chunks = []
for chunk in chunks:
lower_chunk = chunk.text.lower()
found = False
for excl in excl_txt:
if (
not exact_match and (lower_chunk in excl or excl in lower_chunk)
) or lower_chunk == excl:
found = True
break
if not found:
filtered_chunks.append(chunk)
chunks = filtered_chunks
return chunks
| 8,352 | 33.516529 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/roberta/wsc/wsc_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from fairseq.data import encoders
@register_criterion("wsc")
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, "w")
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args.bpe)
self.tokenizer = encoders.build_tokenizer(args.tokenizer)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0)
parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0)
parser.add_argument(
"--wsc-cross-entropy",
action="store_true",
help="use cross entropy formulation instead of margin loss",
)
parser.add_argument(
"--save-predictions", metavar="FILE", help="file to save predictions to"
)
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
-query_lprobs
+ self.args.wsc_margin_alpha
* (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0.0, 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample["labels"]):
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"][i].unsqueeze(0),
sample["query_masks"][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"][i],
sample["candidate_masks"][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample["id"][i].item()
if self.prediction_h is not None:
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
nqueries = sum(log.get("nqueries", 0) for log in logging_outputs)
if nqueries > 0:
agg_output["accuracy"] = ncorrect / float(nqueries)
return agg_output
@register_criterion("winogrande")
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"],
sample["query_masks"],
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"],
sample["candidate_masks"],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample["query_tokens"].size(0)
ncorrect = pred.sum().item()
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": sample_size,
}
return loss, sample_size, logging_output
| 6,037 | 34.940476 | 87 |
py
|
sign-topic
|
sign-topic-main/examples/roberta/wsc/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import wsc_criterion # noqa
from . import wsc_task # noqa
| 245 | 29.75 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/__init__.py
|
from . import criterions, models, tasks # noqa
| 48 | 23.5 | 47 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/infer.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.logging.meters import StopwatchMeter, TimeMeter
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary\
output units",
)
try:
parser.add_argument(
"--lm-weight",
"--lm_weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
except:
pass
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
parser.add_argument(
"--w2l-decoder",
choices=["viterbi", "kenlm", "fairseqlm"],
help="use a w2l decoder",
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--beam-size-token", type=float, default=100)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
parser.add_argument(
"--dump-emissions",
type=str,
default=None,
help="if present, dumps emissions into this file and exits",
)
parser.add_argument(
"--dump-features",
type=str,
default=None,
help="if present, dumps features into this file and exits",
)
parser.add_argument(
"--load-emissions",
type=str,
default=None,
help="if present, loads emissions from this file",
)
return parser
def check_args(args):
# assert args.path is not None, "--path required for generation!"
# assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def get_dataset_itr(args, task, models):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id),
file=res_files["hypo.units"],
)
print(
"{} ({}-{})".format(hyp_words, speaker, id),
file=res_files["hypo.words"],
)
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, args.post_process)
if res_files is not None:
print(
"{} ({}-{})".format(tgt_pieces, speaker, id),
file=res_files["ref.units"],
)
print(
"{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
)
if not args.quiet:
logger.info("HYPO:" + hyp_words)
logger.info("TARGET:" + tgt_words)
logger.info("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def prepare_result_files(args):
def get_res_file(file_prefix):
if args.num_shards > 1:
file_prefix = f"{args.shard_id}_{file_prefix}"
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
if not args.results_path:
return None
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
class ExistingEmissionsDecoder(object):
def __init__(self, decoder, emissions):
self.decoder = decoder
self.emissions = emissions
def generate(self, models, sample, **unused):
ids = sample["id"].cpu().numpy()
try:
emissions = np.stack(self.emissions[ids])
except:
print([x.shape for x in self.emissions[ids]])
raise Exception("invalid sizes")
emissions = torch.from_numpy(emissions)
return self.decoder.decode(emissions)
def main(args, task=None, model_state=None):
check_args(args)
use_fp16 = args.fp16
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 4000000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
logger.info("| decoding with criterion {}".format(args.criterion))
task = tasks.setup_task(args)
# Load ensemble
if args.load_emissions:
models, criterions = [], []
task.load_dataset(args.gen_subset)
else:
logger.info("| loading model(s) from {}".format(args.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(args.path, separator="\\"),
arg_overrides=ast.literal_eval(args.model_overrides),
task=task,
suffix=args.checkpoint_suffix,
strict=(args.checkpoint_shard_count == 1),
num_shards=args.checkpoint_shard_count,
state=model_state,
)
optimize_models(args, use_cuda, models)
task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
raise NotImplementedError("asg_loss is currently not supported")
# trans = criterions[0].asg.trans.data
# args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task, models)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, task.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, task.target_dictionary)
else:
print(
"only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
)
# please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
generator = build_generator(args)
if args.load_emissions:
generator = ExistingEmissionsDecoder(
generator, np.load(args.load_emissions, allow_pickle=True)
)
logger.info("loaded emissions from " + args.load_emissions)
num_sentences = 0
if args.results_path is not None and not os.path.exists(args.results_path):
os.makedirs(args.results_path)
max_source_pos = (
utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
),
)
if max_source_pos is not None:
max_source_pos = max_source_pos[0]
if max_source_pos is not None:
max_source_pos = max_source_pos[0] - 1
if args.dump_emissions:
emissions = {}
if args.dump_features:
features = {}
models[0].bert.proj = None
else:
res_files = prepare_result_files(args)
errs_t = 0
lengths_t = 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if use_fp16:
sample = utils.apply_to_sample(apply_half, sample)
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
if args.dump_emissions:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
emm = emm.transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
emissions[id.item()] = emm[i]
continue
elif args.dump_features:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
padding = (
encoder_out["encoder_padding_mask"][i].cpu().numpy()
if encoder_out["encoder_padding_mask"] is not None
else None
)
features[id.item()] = (feat[i], padding)
continue
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = None
# id = task.dataset(args.gen_subset).ids[int(sample_id)]
id = sample_id
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
# Process top predictions
errs, length = process_predictions(
args,
hypos[i],
None,
tgt_dict,
target_tokens,
res_files,
speaker,
id,
)
errs_t += errs
lengths_t += length
wps_meter.update(num_generated_tokens)
t.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
wer = None
if args.dump_emissions:
emm_arr = []
for i in range(len(emissions)):
emm_arr.append(emissions[i])
np.save(args.dump_emissions, emm_arr)
logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
elif args.dump_features:
feat_arr = []
for i in range(len(features)):
feat_arr.append(features[i])
np.save(args.dump_features, feat_arr)
logger.info(f"saved {len(features)} emissions to {args.dump_features}")
else:
if lengths_t > 0:
wer = errs_t * 100.0 / lengths_t
logger.info(f"WER: {wer}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
"sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
return task, wer
def make_parser():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 14,677 | 32.588101 | 111 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/w2l_decoder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Flashlight decoders.
"""
import gc
import itertools as it
import os.path as osp
from typing import List
import warnings
from collections import deque, namedtuple
import numpy as np
import torch
from examples.speech_recognition.data.replabels import unpack_replabels
from fairseq import tasks
from fairseq.utils import apply_to_sample
from omegaconf import open_dict
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
try:
from flashlight.lib.text.dictionary import create_word_dict, load_words
from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from flashlight.lib.text.decoder import (
CriterionType,
LexiconDecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
)
except:
warnings.warn(
"flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out) # no need to normalize emissions
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
if args.lexicon:
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
self.unit_lm,
)
else:
assert args.unit_lm, "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
"""Returns frame numbers corresponding to every non-blank token.
Parameters
----------
token_idxs : List[int]
IDs of decoded tokens.
Returns
-------
List[int]
Frame numbers corresponding to every non-blank token.
"""
timesteps = []
for i, token_idx in enumerate(token_idxs):
if token_idx == self.blank:
continue
if i == 0 or token_idx != token_idxs[i-1]:
timesteps.append(i)
return timesteps
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"timesteps": self.get_timesteps(result.tokens),
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(args.kenlm_model)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
assert args.unit_lm, "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| 17,396 | 34.722793 | 171 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/criterions/cross_entropy_acc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| 5,372 | 40.015267 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/criterions/ASG_loss.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from examples.speech_recognition.data.replabels import pack_replabels
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(
self,
task,
silence_token,
asg_transitions_init,
max_replabel,
linseg_updates,
hide_linseg_messages,
):
from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode
super().__init__(task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(silence_token)
if silence_token in self.tgt_dict
else None
)
self.max_replabel = max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = linseg_updates
self.linseg_message_state = "none" if hide_linseg_messages else "start"
@classmethod
def build_criterion(cls, args, task):
return cls(
task,
args.silence_token,
args.asg_transitions_init,
args.max_replabel,
args.linseg_updates,
args.hide_linseg_messages,
)
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| 5,870 | 33.333333 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/criterions/__init__.py
|
import importlib
import os
# ASG loss requires flashlight bindings
files_to_skip = set()
try:
import flashlight.lib.sequence.criterion
except ImportError:
files_to_skip.add("ASG_loss.py")
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_recognition.criterions." + criterion_name
)
| 510 | 27.388889 | 87 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/models/vggtransformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LinearizedConvolution,
TransformerDecoderLayer,
TransformerEncoderLayer,
VGGBlock,
)
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = None
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
self.pooling_kernel_sizes = []
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
self.pooling_kernel_sizes.append(pooling_kernel_size)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
input_lengths = src_lengths.clone()
for s in self.pooling_kernel_sizes:
input_lengths = (input_lengths.float() / s).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}: ".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads {}".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
"of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(
TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
)
)
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, *_ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_2")
def vggtransformer_2(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_base")
def vggtransformer_base(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6"
)
# Size estimations:
# Encoder:
# - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K
# Transformer:
# - input dimension adapter: 2560 x 512 -> 1.31M
# - transformer_layers (x12) --> 37.74M
# * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M
# * FFN weight: 512*2048*2 = 2.097M
# - output dimension adapter: 512 x 512 -> 0.26 M
# Decoder:
# - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3
# - transformer_layer: (x6) --> 25.16M
# * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M
# * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M
# * FFN: 512*2048*2 = 2.097M
# Final FC:
# - FC: 512*5000 = 256K (assuming vocab size 5K)
# In total:
# ~65 M
# CTC models
def base_architecture_enconly(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.transformer_context = getattr(args, "transformer_context", "None")
args.transformer_sampling = getattr(args, "transformer_sampling", "None")
@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1")
def vggtransformer_enc_1(args):
# vggtransformer_1 is the same as vggtransformer_enc_big, except the number
# of layers is increased to 16
# keep it here for backward compatiablity purpose
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
| 37,260 | 35.494613 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/models/w2l_conv_glu_enc.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
| 6,078 | 33.151685 | 87 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/models/__init__.py
|
import importlib
import os
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("examples.speech_recognition.models." + model_name)
| 276 | 29.777778 | 83 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/datasets/asr_prep_json.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import concurrent.futures
import json
import multiprocessing
import os
from collections import namedtuple
from itertools import chain
import sentencepiece as spm
from fairseq.data import Dictionary
MILLISECONDS_TO_SECONDS = 0.001
def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
import torchaudio
input = {}
output = {}
si, ei = torchaudio.info(aud_path)
input["length_ms"] = int(
si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS
)
input["path"] = aud_path
token = " ".join(sp.EncodeAsPieces(lable))
ids = tgt_dict.encode_line(token, append_eos=False)
output["text"] = lable
output["token"] = token
output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids]))
return {utt_id: {"input": input, "output": output}}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--audio-dirs",
nargs="+",
default=["-"],
required=True,
help="input directories with audio files",
)
parser.add_argument(
"--labels",
required=True,
help="aggregated input labels with format <ID LABEL> per line",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument(
"--spm-model",
required=True,
help="sentencepiece model to use for encoding",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument(
"--dictionary",
required=True,
help="file to load fairseq dictionary from",
type=argparse.FileType("r", encoding="UTF-8"),
)
parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav")
parser.add_argument(
"--output",
required=True,
type=argparse.FileType("w"),
help="path to save json output",
)
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.spm_model.name)
tgt_dict = Dictionary.load(args.dictionary)
labels = {}
for line in args.labels:
(utt_id, label) = line.split(" ", 1)
labels[utt_id] = label
if len(labels) == 0:
raise Exception("No labels found in ", args.labels_path)
Sample = namedtuple("Sample", "aud_path utt_id")
samples = []
for path, _, files in chain.from_iterable(
os.walk(path) for path in args.audio_dirs
):
for f in files:
if f.endswith(args.audio_format):
if len(os.path.splitext(f)) != 2:
raise Exception("Expect <utt_id.extension> file name. Got: ", f)
utt_id = os.path.splitext(f)[0]
if utt_id not in labels:
continue
samples.append(Sample(os.path.join(path, f), utt_id))
utts = {}
num_cpu = multiprocessing.cpu_count()
with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor:
future_to_sample = {
executor.submit(
process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict
): s
for s in samples
}
for future in concurrent.futures.as_completed(future_to_sample):
try:
data = future.result()
except Exception as exc:
print("generated an exception: ", exc)
else:
utts.update(data)
json.dump({"utts": utts}, args.output, indent=4)
if __name__ == "__main__":
main()
| 3,775 | 28.968254 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/new/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/speech_recognition/new/infer.py
|
#!/usr/bin/env python -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import hashlib
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import editdistance
import torch
import torch.distributed as dist
from examples.speech_recognition.new.decoders.decoder_config import (
DecoderConfig,
FlashlightDecoderConfig,
)
from examples.speech_recognition.new.decoders.decoder import Decoder
from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
FairseqDataclass,
)
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.logging.progress_bar import BaseProgressBar
from fairseq.models.fairseq_model import FairseqModel
from omegaconf import OmegaConf
import hydra
from hydra.core.config_store import ConfigStore
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(__file__).resolve().parent / "conf"
@dataclass
class DecodingConfig(DecoderConfig, FlashlightDecoderConfig):
unique_wer_file: bool = field(
default=False,
metadata={"help": "If set, use a unique file for storing WER"},
)
results_path: Optional[str] = field(
default=None,
metadata={
"help": "If set, write hypothesis and reference sentences into this directory"
},
)
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
decoding: DecodingConfig = DecodingConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
class InferenceProcessor:
cfg: InferConfig
def __init__(self, cfg: InferConfig) -> None:
self.cfg = cfg
self.task = tasks.setup_task(cfg.task)
models, saved_cfg = self.load_model_ensemble()
self.models = models
self.saved_cfg = saved_cfg
self.tgt_dict = self.task.target_dictionary
self.task.load_dataset(
self.cfg.dataset.gen_subset,
task_cfg=saved_cfg.task,
)
self.generator = Decoder(cfg.decoding, self.tgt_dict)
self.gen_timer = StopwatchMeter()
self.wps_meter = TimeMeter()
self.num_sentences = 0
self.total_errors = 0
self.total_length = 0
self.hypo_words_file = None
self.hypo_units_file = None
self.ref_words_file = None
self.ref_units_file = None
self.progress_bar = self.build_progress_bar()
def __enter__(self) -> "InferenceProcessor":
if self.cfg.decoding.results_path is not None:
self.hypo_words_file = self.get_res_file("hypo.word")
self.hypo_units_file = self.get_res_file("hypo.units")
self.ref_words_file = self.get_res_file("ref.word")
self.ref_units_file = self.get_res_file("ref.units")
return self
def __exit__(self, *exc) -> bool:
if self.cfg.decoding.results_path is not None:
self.hypo_words_file.close()
self.hypo_units_file.close()
self.ref_words_file.close()
self.ref_units_file.close()
return False
def __iter__(self) -> Any:
for sample in self.progress_bar:
if not self.cfg.common.cpu:
sample = utils.move_to_cuda(sample)
# Happens on the last batch.
if "net_input" not in sample:
continue
yield sample
def log(self, *args, **kwargs):
self.progress_bar.log(*args, **kwargs)
def print(self, *args, **kwargs):
self.progress_bar.print(*args, **kwargs)
def get_res_file(self, fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
if self.data_parallel_world_size > 1:
fname = f"{fname}.{self.data_parallel_rank}"
return open(fname, "w", buffering=1)
def merge_shards(self) -> None:
"""Merges all shard files into shard 0, then removes shard suffix."""
shard_id = self.data_parallel_rank
num_shards = self.data_parallel_world_size
if self.data_parallel_world_size > 1:
def merge_shards_with_root(fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
logger.info("Merging %s on shard %d", fname, shard_id)
base_fpath = Path(f"{fname}.0")
with open(base_fpath, "a") as out_file:
for s in range(1, num_shards):
shard_fpath = Path(f"{fname}.{s}")
with open(shard_fpath, "r") as in_file:
for line in in_file:
out_file.write(line)
shard_fpath.unlink()
shutil.move(f"{fname}.0", fname)
dist.barrier() # ensure all shards finished writing
if shard_id == (0 % num_shards):
merge_shards_with_root("hypo.word")
if shard_id == (1 % num_shards):
merge_shards_with_root("hypo.units")
if shard_id == (2 % num_shards):
merge_shards_with_root("ref.word")
if shard_id == (3 % num_shards):
merge_shards_with_root("ref.units")
dist.barrier()
def optimize_model(self, model: FairseqModel) -> None:
model.make_generation_fast_()
if self.cfg.common.fp16:
model.half()
if not self.cfg.common.cpu:
model.cuda()
def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]:
arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(self.cfg.common_eval.path, separator="\\"),
arg_overrides=arg_overrides,
task=self.task,
suffix=self.cfg.checkpoint.checkpoint_suffix,
strict=(self.cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=self.cfg.checkpoint.checkpoint_shard_count,
)
for model in models:
self.optimize_model(model)
return models, saved_cfg
def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None:
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.gen_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
).next_epoch_itr(shuffle=False)
def build_progress_bar(
self,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default_log_format: str = "tqdm",
) -> BaseProgressBar:
return progress_bar.progress_bar(
iterator=self.get_dataset_itr(),
log_format=self.cfg.common.log_format,
log_interval=self.cfg.common.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=self.cfg.common.tensorboard_logdir,
default_log_format=default_log_format,
)
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
def process_sentence(
self,
sample: Dict[str, Any],
hypo: Dict[str, Any],
sid: int,
batch_id: int,
) -> Tuple[int, int]:
speaker = None # Speaker can't be parsed from dataset.
if "target_label" in sample:
toks = sample["target_label"]
else:
toks = sample["target"]
toks = toks[batch_id, :]
# Processes hypothesis.
hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process)
# Processes target.
target_tokens = utils.strip_pad(toks, self.tgt_dict.pad())
tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu())
tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process)
if self.cfg.decoding.results_path is not None:
print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file)
print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file)
print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file)
print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file)
if not self.cfg.common_eval.quiet:
logger.info(f"HYPO: {hyp_words}")
logger.info(f"REF: {tgt_words}")
logger.info("---------------------")
hyp_words, tgt_words = hyp_words.split(), tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def process_sample(self, sample: Dict[str, Any]) -> None:
self.gen_timer.start()
hypos = self.task.inference_step(
generator=self.generator,
models=self.models,
sample=sample,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
self.gen_timer.stop(num_generated_tokens)
self.wps_meter.update(num_generated_tokens)
for batch_id, sample_id in enumerate(sample["id"].tolist()):
errs, length = self.process_sentence(
sample=sample,
sid=sample_id,
batch_id=batch_id,
hypo=hypos[batch_id][0],
)
self.total_errors += errs
self.total_length += length
self.log({"wps": round(self.wps_meter.avg)})
if "nsentences" in sample:
self.num_sentences += sample["nsentences"]
else:
self.num_sentences += sample["id"].numel()
def log_generation_time(self) -> None:
logger.info(
"Processed %d sentences (%d tokens) in %.1fs %.2f "
"sentences per second, %.2f tokens per second)",
self.num_sentences,
self.gen_timer.n,
self.gen_timer.sum,
self.num_sentences / self.gen_timer.sum,
1.0 / self.gen_timer.avg,
)
def parse_wer(wer_file: Path) -> float:
with open(wer_file, "r") as f:
return float(f.readline().strip().split(" ")[1])
def get_wer_file(cfg: InferConfig) -> Path:
"""Hashes the decoding parameters to a unique file ID."""
base_path = "wer"
if cfg.decoding.results_path is not None:
base_path = os.path.join(cfg.decoding.results_path, base_path)
if cfg.decoding.unique_wer_file:
yaml_str = OmegaConf.to_yaml(cfg.decoding)
fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16)
return Path(f"{base_path}.{fid % 1000000}")
else:
return Path(base_path)
def main(cfg: InferConfig) -> float:
"""Entry point for main processing logic.
Args:
cfg: The inferance configuration to use.
wer: Optional shared memory pointer for returning the WER. If not None,
the final WER value will be written here instead of being returned.
Returns:
The final WER if `wer` is None, otherwise None.
"""
yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg)
# Validates the provided configuration.
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 4000000
if not cfg.common.cpu and not torch.cuda.is_available():
raise ValueError("CUDA not found; set `cpu=True` to run without CUDA")
with InferenceProcessor(cfg) as processor:
for sample in processor:
processor.process_sample(sample)
processor.log_generation_time()
if cfg.decoding.results_path is not None:
processor.merge_shards()
errs_t, leng_t = processor.total_errors, processor.total_length
if cfg.common.cpu:
logger.warning("Merging WER requires CUDA.")
elif processor.data_parallel_world_size > 1:
stats = torch.LongTensor([errs_t, leng_t]).cuda()
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
errs_t, leng_t = stats[0].item(), stats[1].item()
wer = errs_t * 100.0 / leng_t
if distributed_utils.is_master(cfg.distributed_training):
with open(wer_file, "w") as f:
f.write(
(
f"WER: {wer}\n"
f"err / num_ref_words = {errs_t} / {leng_t}\n\n"
f"{yaml_str}"
)
)
return wer
@hydra.main(config_path=config_path, config_name="infer")
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
# logger.info("Config:\n%s", OmegaConf.to_yaml(cfg))
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
wer = parse_wer(get_wer_file(cfg))
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
logger.info("Word error rate: %.4f", wer)
if cfg.is_ax:
return wer, None
return wer
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| 16,498 | 33.955508 | 103 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/new/decoders/decoder_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import Optional
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.dataclass.constants import ChoiceEnum
from omegaconf import MISSING
DECODER_CHOICES = ChoiceEnum(["viterbi", "kenlm", "fairseqlm"])
@dataclass
class DecoderConfig(FairseqDataclass):
type: DECODER_CHOICES = field(
default="viterbi",
metadata={"help": "The type of decoder to use"},
)
@dataclass
class FlashlightDecoderConfig(FairseqDataclass):
nbest: int = field(
default=1,
metadata={"help": "Number of decodings to return"},
)
unitlm: bool = field(
default=False,
metadata={"help": "If set, use unit language model"},
)
lmpath: str = field(
default=MISSING,
metadata={"help": "Language model for KenLM decoder"},
)
lexicon: Optional[str] = field(
default=None,
metadata={"help": "Lexicon for Flashlight decoder"},
)
beam: int = field(
default=50,
metadata={"help": "Number of beams to use for decoding"},
)
beamthreshold: float = field(
default=50.0,
metadata={"help": "Threshold for beam search decoding"},
)
beamsizetoken: Optional[int] = field(
default=None, metadata={"help": "Beam size to use"}
)
wordscore: float = field(
default=-1,
metadata={"help": "Word score for KenLM decoder"},
)
unkweight: float = field(
default=-math.inf,
metadata={"help": "Unknown weight for KenLM decoder"},
)
silweight: float = field(
default=0,
metadata={"help": "Silence weight for KenLM decoder"},
)
lmweight: float = field(
default=2,
metadata={"help": "Weight for LM while interpolating score"},
)
| 2,004 | 27.239437 | 69 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/new/decoders/decoder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Union
from fairseq.data.dictionary import Dictionary
from .decoder_config import DecoderConfig, FlashlightDecoderConfig
from .base_decoder import BaseDecoder
def Decoder(
cfg: Union[DecoderConfig, FlashlightDecoderConfig], tgt_dict: Dictionary
) -> BaseDecoder:
if cfg.type == "viterbi":
from .viterbi_decoder import ViterbiDecoder
return ViterbiDecoder(tgt_dict)
if cfg.type == "kenlm":
from .flashlight_decoder import KenLMDecoder
return KenLMDecoder(cfg, tgt_dict)
if cfg.type == "fairseqlm":
from .flashlight_decoder import FairseqLMDecoder
return FairseqLMDecoder(cfg, tgt_dict)
raise NotImplementedError(f"Invalid decoder name: {cfg.name}")
| 944 | 27.636364 | 76 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/new/decoders/base_decoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools as it
from typing import Any, Dict, List
import torch
from fairseq.data.dictionary import Dictionary
from fairseq.models.fairseq_model import FairseqModel
class BaseDecoder:
def __init__(self, tgt_dict: Dictionary) -> None:
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
def generate(
self, models: List[FairseqModel], sample: Dict[str, Any], **unused
) -> List[List[Dict[str, torch.LongTensor]]]:
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(
self,
models: List[FairseqModel],
encoder_input: Dict[str, Any],
) -> torch.FloatTensor:
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out)
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor:
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
raise NotImplementedError
| 2,093 | 32.238095 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/new/decoders/viterbi_decoder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from typing import List, Dict
from .base_decoder import BaseDecoder
class ViterbiDecoder(BaseDecoder):
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
def get_pred(e):
toks = e.argmax(dim=-1).unique_consecutive()
return toks[toks != self.blank]
return [[{"tokens": get_pred(x), "score": 0}] for x in emissions]
| 641 | 24.68 | 73 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/new/decoders/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/speech_recognition/new/decoders/flashlight_decoder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import os.path as osp
import warnings
from collections import deque, namedtuple
from typing import Any, Dict, Tuple
import numpy as np
import torch
from fairseq import tasks
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models.fairseq_model import FairseqModel
from fairseq.utils import apply_to_sample
from omegaconf import open_dict, OmegaConf
from typing import List
from .decoder_config import FlashlightDecoderConfig
from .base_decoder import BaseDecoder
try:
from flashlight.lib.text.decoder import (
LM,
CriterionType,
DecodeResult,
KenLM,
LexiconDecoder,
LexiconDecoderOptions,
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
LMState,
SmearingMode,
Trie,
)
from flashlight.lib.text.dictionary import create_word_dict, load_words
except ImportError:
warnings.warn(
"flashlight python bindings are required to use this functionality. "
"Please install from "
"https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
class KenLMDecoder(BaseDecoder):
def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None:
super().__init__(tgt_dict)
self.nbest = cfg.nbest
self.unitlm = cfg.unitlm
if cfg.lexicon:
self.lexicon = load_words(cfg.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for word, spellings in self.lexicon.items():
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{word} {spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
word_score=cfg.wordscore,
unk_score=cfg.unkweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unitlm,
)
else:
assert self.unitlm, "Lexicon-free decoding requires unit LM"
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
"""Returns frame numbers corresponding to every non-blank token.
Parameters
----------
token_idxs : List[int]
IDs of decoded tokens.
Returns
-------
List[int]
Frame numbers corresponding to every non-blank token.
"""
timesteps = []
for i, token_idx in enumerate(token_idxs):
if token_idx == self.blank:
continue
if i == 0 or token_idx != token_idxs[i-1]:
timesteps.append(i)
return timesteps
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"timesteps": self.get_timesteps(result.tokens),
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple(
"FairseqLMState",
[
"prefix",
"incremental_state",
"probs",
],
)
class FairseqLM(LM):
def __init__(self, dictionary: Dictionary, model: FairseqModel) -> None:
super().__init__()
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
if torch.cuda.is_available():
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing: bool) -> LMState:
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(
self,
state: LMState,
token_index: int,
no_cache: bool = False,
) -> Tuple[LMState, int]:
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size: int) -> None:
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState) -> Tuple[LMState, int]:
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self) -> None:
self.states = {}
self.stateq = deque()
gc.collect()
class FairseqLMDecoder(BaseDecoder):
def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None:
super().__init__(tgt_dict)
self.nbest = cfg.nbest
self.unitlm = cfg.unitlm
self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(cfg.lmpath, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
if not OmegaConf.is_dict(lm_args):
lm_args = OmegaConf.create(lm_args)
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(cfg.lmpath)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unitlm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
word_score=cfg.wordscore,
unk_score=cfg.unkweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unitlm,
)
else:
assert self.unitlm, "Lexicon-free decoding requires unit LM"
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(cfg.lmpath, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=cfg.beam,
beam_size_token=cfg.beamsizetoken or len(tgt_dict),
beam_threshold=cfg.beamthreshold,
lm_weight=cfg.lmweight,
sil_score=cfg.silweight,
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(
self,
emissions: torch.FloatTensor,
) -> List[List[Dict[str, torch.LongTensor]]]:
B, T, N = emissions.size()
hypos = []
def make_hypo(result: DecodeResult) -> Dict[str, Any]:
hypo = {
"tokens": self.get_tokens(result.tokens),
"score": result.score,
}
if self.lexicon:
hypo["words"] = [
self.idx_to_wrd[x] if self.unitlm else self.word_dict[x]
for x in result.words
if x >= 0
]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| 14,746 | 33.136574 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/kaldi/kaldi_initializer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import hydra
from hydra.core.config_store import ConfigStore
import logging
from omegaconf import MISSING, OmegaConf
import os
import os.path as osp
from pathlib import Path
import subprocess
from typing import Optional
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass import FairseqDataclass
script_dir = Path(__file__).resolve().parent
config_path = script_dir / "config"
logger = logging.getLogger(__name__)
@dataclass
class KaldiInitializerConfig(FairseqDataclass):
data_dir: str = MISSING
fst_dir: Optional[str] = None
in_labels: str = MISSING
out_labels: Optional[str] = None
wav2letter_lexicon: Optional[str] = None
lm_arpa: str = MISSING
kaldi_root: str = MISSING
blank_symbol: str = "<s>"
silence_symbol: Optional[str] = None
def create_units(fst_dir: Path, in_labels: str, vocab: Dictionary) -> Path:
in_units_file = fst_dir / f"kaldi_dict.{in_labels}.txt"
if not in_units_file.exists():
logger.info(f"Creating {in_units_file}")
with open(in_units_file, "w") as f:
print("<eps> 0", file=f)
i = 1
for symb in vocab.symbols[vocab.nspecial :]:
if not symb.startswith("madeupword"):
print(f"{symb} {i}", file=f)
i += 1
return in_units_file
def create_lexicon(
cfg: KaldiInitializerConfig,
fst_dir: Path,
unique_label: str,
in_units_file: Path,
out_words_file: Path,
) -> (Path, Path):
disambig_in_units_file = fst_dir / f"kaldi_dict.{cfg.in_labels}_disambig.txt"
lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}.txt"
disambig_lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}_disambig.txt"
if (
not lexicon_file.exists()
or not disambig_lexicon_file.exists()
or not disambig_in_units_file.exists()
):
logger.info(f"Creating {lexicon_file} (in units file: {in_units_file})")
assert cfg.wav2letter_lexicon is not None or cfg.in_labels == cfg.out_labels
if cfg.wav2letter_lexicon is not None:
lm_words = set()
with open(out_words_file, "r") as lm_dict_f:
for line in lm_dict_f:
lm_words.add(line.split()[0])
num_skipped = 0
total = 0
with open(cfg.wav2letter_lexicon, "r") as w2l_lex_f, open(
lexicon_file, "w"
) as out_f:
for line in w2l_lex_f:
items = line.rstrip().split("\t")
assert len(items) == 2, items
if items[0] in lm_words:
print(items[0], items[1], file=out_f)
else:
num_skipped += 1
logger.debug(
f"Skipping word {items[0]} as it was not found in LM"
)
total += 1
if num_skipped > 0:
logger.warning(
f"Skipped {num_skipped} out of {total} words as they were not found in LM"
)
else:
with open(in_units_file, "r") as in_f, open(lexicon_file, "w") as out_f:
for line in in_f:
symb = line.split()[0]
if symb != "<eps>" and symb != "<ctc_blank>" and symb != "<SIL>":
print(symb, symb, file=out_f)
lex_disambig_path = (
Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_lex_disambig.pl"
)
res = subprocess.run(
[lex_disambig_path, lexicon_file, disambig_lexicon_file],
check=True,
capture_output=True,
)
ndisambig = int(res.stdout)
disamib_path = Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_disambig.pl"
res = subprocess.run(
[disamib_path, "--include-zero", in_units_file, str(ndisambig)],
check=True,
capture_output=True,
)
with open(disambig_in_units_file, "wb") as f:
f.write(res.stdout)
return disambig_lexicon_file, disambig_in_units_file
def create_G(
kaldi_root: Path, fst_dir: Path, lm_arpa: Path, arpa_base: str
) -> (Path, Path):
out_words_file = fst_dir / f"kaldi_dict.{arpa_base}.txt"
grammar_graph = fst_dir / f"G_{arpa_base}.fst"
if not grammar_graph.exists() or not out_words_file.exists():
logger.info(f"Creating {grammar_graph}")
arpa2fst = kaldi_root / "src/lmbin/arpa2fst"
subprocess.run(
[
arpa2fst,
"--disambig-symbol=#0",
f"--write-symbol-table={out_words_file}",
lm_arpa,
grammar_graph,
],
check=True,
)
return grammar_graph, out_words_file
def create_L(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
lexicon_file: Path,
in_units_file: Path,
out_words_file: Path,
) -> Path:
lexicon_graph = fst_dir / f"L.{unique_label}.fst"
if not lexicon_graph.exists():
logger.info(f"Creating {lexicon_graph} (in units: {in_units_file})")
make_lex = kaldi_root / "egs/wsj/s5/utils/make_lexicon_fst.pl"
fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile"
fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops"
fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort"
def write_disambig_symbol(file):
with open(file, "r") as f:
for line in f:
items = line.rstrip().split()
if items[0] == "#0":
out_path = str(file) + "_disamig"
with open(out_path, "w") as out_f:
print(items[1], file=out_f)
return out_path
return None
in_disambig_sym = write_disambig_symbol(in_units_file)
assert in_disambig_sym is not None
out_disambig_sym = write_disambig_symbol(out_words_file)
assert out_disambig_sym is not None
try:
with open(lexicon_graph, "wb") as out_f:
res = subprocess.run(
[make_lex, lexicon_file], capture_output=True, check=True
)
assert len(res.stderr) == 0, res.stderr.decode("utf-8")
res = subprocess.run(
[
fstcompile,
f"--isymbols={in_units_file}",
f"--osymbols={out_words_file}",
"--keep_isymbols=false",
"--keep_osymbols=false",
],
input=res.stdout,
capture_output=True,
)
assert len(res.stderr) == 0, res.stderr.decode("utf-8")
res = subprocess.run(
[fstaddselfloops, in_disambig_sym, out_disambig_sym],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstarcsort, "--sort_type=olabel"],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(lexicon_graph)
raise
except AssertionError:
os.remove(lexicon_graph)
raise
return lexicon_graph
def create_LG(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
lexicon_graph: Path,
grammar_graph: Path,
) -> Path:
lg_graph = fst_dir / f"LG.{unique_label}.fst"
if not lg_graph.exists():
logger.info(f"Creating {lg_graph}")
fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose"
fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar"
fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded"
fstpushspecial = kaldi_root / "src/fstbin/fstpushspecial"
fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort"
try:
with open(lg_graph, "wb") as out_f:
res = subprocess.run(
[fsttablecompose, lexicon_graph, grammar_graph],
capture_output=True,
check=True,
)
res = subprocess.run(
[
fstdeterminizestar,
"--use-log=true",
],
input=res.stdout,
capture_output=True,
)
res = subprocess.run(
[fstminimizeencoded],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstpushspecial],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstarcsort, "--sort_type=ilabel"],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(lg_graph)
raise
return lg_graph
def create_H(
kaldi_root: Path,
fst_dir: Path,
disambig_out_units_file: Path,
in_labels: str,
vocab: Dictionary,
blk_sym: str,
silence_symbol: Optional[str],
) -> (Path, Path, Path):
h_graph = (
fst_dir / f"H.{in_labels}{'_' + silence_symbol if silence_symbol else ''}.fst"
)
h_out_units_file = fst_dir / f"kaldi_dict.h_out.{in_labels}.txt"
disambig_in_units_file_int = Path(str(h_graph) + "isym_disambig.int")
disambig_out_units_file_int = Path(str(disambig_out_units_file) + ".int")
if (
not h_graph.exists()
or not h_out_units_file.exists()
or not disambig_in_units_file_int.exists()
):
logger.info(f"Creating {h_graph}")
eps_sym = "<eps>"
num_disambig = 0
osymbols = []
with open(disambig_out_units_file, "r") as f, open(
disambig_out_units_file_int, "w"
) as out_f:
for line in f:
symb, id = line.rstrip().split()
if line.startswith("#"):
num_disambig += 1
print(id, file=out_f)
else:
if len(osymbols) == 0:
assert symb == eps_sym, symb
osymbols.append((symb, id))
i_idx = 0
isymbols = [(eps_sym, 0)]
imap = {}
for i, s in enumerate(vocab.symbols):
i_idx += 1
isymbols.append((s, i_idx))
imap[s] = i_idx
fst_str = []
node_idx = 0
root_node = node_idx
special_symbols = [blk_sym]
if silence_symbol is not None:
special_symbols.append(silence_symbol)
for ss in special_symbols:
fst_str.append("{} {} {} {}".format(root_node, root_node, ss, eps_sym))
for symbol, _ in osymbols:
if symbol == eps_sym or symbol.startswith("#"):
continue
node_idx += 1
# 1. from root to emitting state
fst_str.append("{} {} {} {}".format(root_node, node_idx, symbol, symbol))
# 2. from emitting state back to root
fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym))
# 3. from emitting state to optional blank state
pre_node = node_idx
node_idx += 1
for ss in special_symbols:
fst_str.append("{} {} {} {}".format(pre_node, node_idx, ss, eps_sym))
# 4. from blank state back to root
fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym))
fst_str.append("{}".format(root_node))
fst_str = "\n".join(fst_str)
h_str = str(h_graph)
isym_file = h_str + ".isym"
with open(isym_file, "w") as f:
for sym, id in isymbols:
f.write("{} {}\n".format(sym, id))
with open(h_out_units_file, "w") as f:
for sym, id in osymbols:
f.write("{} {}\n".format(sym, id))
with open(disambig_in_units_file_int, "w") as f:
disam_sym_id = len(isymbols)
for _ in range(num_disambig):
f.write("{}\n".format(disam_sym_id))
disam_sym_id += 1
fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile"
fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops"
fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort"
try:
with open(h_graph, "wb") as out_f:
res = subprocess.run(
[
fstcompile,
f"--isymbols={isym_file}",
f"--osymbols={h_out_units_file}",
"--keep_isymbols=false",
"--keep_osymbols=false",
],
input=str.encode(fst_str),
capture_output=True,
check=True,
)
res = subprocess.run(
[
fstaddselfloops,
disambig_in_units_file_int,
disambig_out_units_file_int,
],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstarcsort, "--sort_type=olabel"],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(h_graph)
raise
return h_graph, h_out_units_file, disambig_in_units_file_int
def create_HLGa(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
h_graph: Path,
lg_graph: Path,
disambig_in_words_file_int: Path,
) -> Path:
hlga_graph = fst_dir / f"HLGa.{unique_label}.fst"
if not hlga_graph.exists():
logger.info(f"Creating {hlga_graph}")
fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose"
fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar"
fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols"
fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal"
fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded"
try:
with open(hlga_graph, "wb") as out_f:
res = subprocess.run(
[
fsttablecompose,
h_graph,
lg_graph,
],
capture_output=True,
check=True,
)
res = subprocess.run(
[fstdeterminizestar, "--use-log=true"],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstrmsymbols, disambig_in_words_file_int],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstrmepslocal],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstminimizeencoded],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(hlga_graph)
raise
return hlga_graph
def create_HLa(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
h_graph: Path,
l_graph: Path,
disambig_in_words_file_int: Path,
) -> Path:
hla_graph = fst_dir / f"HLa.{unique_label}.fst"
if not hla_graph.exists():
logger.info(f"Creating {hla_graph}")
fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose"
fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar"
fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols"
fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal"
fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded"
try:
with open(hla_graph, "wb") as out_f:
res = subprocess.run(
[
fsttablecompose,
h_graph,
l_graph,
],
capture_output=True,
check=True,
)
res = subprocess.run(
[fstdeterminizestar, "--use-log=true"],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstrmsymbols, disambig_in_words_file_int],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstrmepslocal],
input=res.stdout,
capture_output=True,
check=True,
)
res = subprocess.run(
[fstminimizeencoded],
input=res.stdout,
capture_output=True,
check=True,
)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(hla_graph)
raise
return hla_graph
def create_HLG(
kaldi_root: Path,
fst_dir: Path,
unique_label: str,
hlga_graph: Path,
prefix: str = "HLG",
) -> Path:
hlg_graph = fst_dir / f"{prefix}.{unique_label}.fst"
if not hlg_graph.exists():
logger.info(f"Creating {hlg_graph}")
add_self_loop = script_dir / "add-self-loop-simple"
kaldi_src = kaldi_root / "src"
kaldi_lib = kaldi_src / "lib"
try:
if not add_self_loop.exists():
fst_include = kaldi_root / "tools/openfst-1.6.7/include"
add_self_loop_src = script_dir / "add-self-loop-simple.cc"
subprocess.run(
[
"c++",
f"-I{kaldi_src}",
f"-I{fst_include}",
f"-L{kaldi_lib}",
add_self_loop_src,
"-lkaldi-base",
"-lkaldi-fstext",
"-o",
add_self_loop,
],
check=True,
)
my_env = os.environ.copy()
my_env["LD_LIBRARY_PATH"] = f"{kaldi_lib}:{my_env['LD_LIBRARY_PATH']}"
subprocess.run(
[
add_self_loop,
hlga_graph,
hlg_graph,
],
check=True,
capture_output=True,
env=my_env,
)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
raise
return hlg_graph
def initalize_kaldi(cfg: KaldiInitializerConfig) -> Path:
if cfg.fst_dir is None:
cfg.fst_dir = osp.join(cfg.data_dir, "kaldi")
if cfg.out_labels is None:
cfg.out_labels = cfg.in_labels
kaldi_root = Path(cfg.kaldi_root)
data_dir = Path(cfg.data_dir)
fst_dir = Path(cfg.fst_dir)
fst_dir.mkdir(parents=True, exist_ok=True)
arpa_base = osp.splitext(osp.basename(cfg.lm_arpa))[0]
unique_label = f"{cfg.in_labels}.{arpa_base}"
with open(data_dir / f"dict.{cfg.in_labels}.txt", "r") as f:
vocab = Dictionary.load(f)
in_units_file = create_units(fst_dir, cfg.in_labels, vocab)
grammar_graph, out_words_file = create_G(
kaldi_root, fst_dir, Path(cfg.lm_arpa), arpa_base
)
disambig_lexicon_file, disambig_L_in_units_file = create_lexicon(
cfg, fst_dir, unique_label, in_units_file, out_words_file
)
h_graph, h_out_units_file, disambig_in_units_file_int = create_H(
kaldi_root,
fst_dir,
disambig_L_in_units_file,
cfg.in_labels,
vocab,
cfg.blank_symbol,
cfg.silence_symbol,
)
lexicon_graph = create_L(
kaldi_root,
fst_dir,
unique_label,
disambig_lexicon_file,
disambig_L_in_units_file,
out_words_file,
)
lg_graph = create_LG(
kaldi_root, fst_dir, unique_label, lexicon_graph, grammar_graph
)
hlga_graph = create_HLGa(
kaldi_root, fst_dir, unique_label, h_graph, lg_graph, disambig_in_units_file_int
)
hlg_graph = create_HLG(kaldi_root, fst_dir, unique_label, hlga_graph)
# for debugging
# hla_graph = create_HLa(kaldi_root, fst_dir, unique_label, h_graph, lexicon_graph, disambig_in_units_file_int)
# hl_graph = create_HLG(kaldi_root, fst_dir, unique_label, hla_graph, prefix="HL_looped")
# create_HLG(kaldi_root, fst_dir, "phnc", h_graph, prefix="H_looped")
return hlg_graph
@hydra.main(config_path=config_path, config_name="kaldi_initializer")
def cli_main(cfg: KaldiInitializerConfig) -> None:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
initalize_kaldi(cfg)
if __name__ == "__main__":
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "kaldi_initializer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "kaldi_initializer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=KaldiInitializerConfig)
cli_main()
| 23,441 | 32.536481 | 115 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/kaldi/kaldi_decoder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from concurrent.futures import ThreadPoolExecutor
import logging
from omegaconf import MISSING
import os
import torch
from typing import Optional
import warnings
from dataclasses import dataclass
from fairseq.dataclass import FairseqDataclass
from .kaldi_initializer import KaldiInitializerConfig, initalize_kaldi
logger = logging.getLogger(__name__)
@dataclass
class KaldiDecoderConfig(FairseqDataclass):
hlg_graph_path: Optional[str] = None
output_dict: str = MISSING
kaldi_initializer_config: Optional[KaldiInitializerConfig] = None
acoustic_scale: float = 0.5
max_active: int = 10000
beam_delta: float = 0.5
hash_ratio: float = 2.0
is_lattice: bool = False
lattice_beam: float = 10.0
prune_interval: int = 25
determinize_lattice: bool = True
prune_scale: float = 0.1
max_mem: int = 0
phone_determinize: bool = True
word_determinize: bool = True
minimize: bool = True
num_threads: int = 1
class KaldiDecoder(object):
def __init__(
self,
cfg: KaldiDecoderConfig,
beam: int,
nbest: int = 1,
):
try:
from kaldi.asr import FasterRecognizer, LatticeFasterRecognizer
from kaldi.base import set_verbose_level
from kaldi.decoder import (
FasterDecoder,
FasterDecoderOptions,
LatticeFasterDecoder,
LatticeFasterDecoderOptions,
)
from kaldi.lat.functions import DeterminizeLatticePhonePrunedOptions
from kaldi.fstext import read_fst_kaldi, SymbolTable
except:
warnings.warn(
"pykaldi is required for this functionality. Please install from https://github.com/pykaldi/pykaldi"
)
# set_verbose_level(2)
self.acoustic_scale = cfg.acoustic_scale
self.nbest = nbest
if cfg.hlg_graph_path is None:
assert (
cfg.kaldi_initializer_config is not None
), "Must provide hlg graph path or kaldi initializer config"
cfg.hlg_graph_path = initalize_kaldi(cfg.kaldi_initializer_config)
assert os.path.exists(cfg.hlg_graph_path), cfg.hlg_graph_path
if cfg.is_lattice:
self.dec_cls = LatticeFasterDecoder
opt_cls = LatticeFasterDecoderOptions
self.rec_cls = LatticeFasterRecognizer
else:
assert self.nbest == 1, "nbest > 1 requires lattice decoder"
self.dec_cls = FasterDecoder
opt_cls = FasterDecoderOptions
self.rec_cls = FasterRecognizer
self.decoder_options = opt_cls()
self.decoder_options.beam = beam
self.decoder_options.max_active = cfg.max_active
self.decoder_options.beam_delta = cfg.beam_delta
self.decoder_options.hash_ratio = cfg.hash_ratio
if cfg.is_lattice:
self.decoder_options.lattice_beam = cfg.lattice_beam
self.decoder_options.prune_interval = cfg.prune_interval
self.decoder_options.determinize_lattice = cfg.determinize_lattice
self.decoder_options.prune_scale = cfg.prune_scale
det_opts = DeterminizeLatticePhonePrunedOptions()
det_opts.max_mem = cfg.max_mem
det_opts.phone_determinize = cfg.phone_determinize
det_opts.word_determinize = cfg.word_determinize
det_opts.minimize = cfg.minimize
self.decoder_options.det_opts = det_opts
self.output_symbols = {}
with open(cfg.output_dict, "r") as f:
for line in f:
items = line.rstrip().split()
assert len(items) == 2
self.output_symbols[int(items[1])] = items[0]
logger.info(f"Loading FST from {cfg.hlg_graph_path}")
self.fst = read_fst_kaldi(cfg.hlg_graph_path)
self.symbol_table = SymbolTable.read_text(cfg.output_dict)
self.executor = ThreadPoolExecutor(max_workers=cfg.num_threads)
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions, padding = self.get_emissions(models, encoder_input)
return self.decode(emissions, padding)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
model = models[0]
all_encoder_out = [m(**encoder_input) for m in models]
if len(all_encoder_out) > 1:
if "encoder_out" in all_encoder_out[0]:
encoder_out = {
"encoder_out": sum(e["encoder_out"] for e in all_encoder_out)
/ len(all_encoder_out),
"encoder_padding_mask": all_encoder_out[0]["encoder_padding_mask"],
}
padding = encoder_out["encoder_padding_mask"]
else:
encoder_out = {
"logits": sum(e["logits"] for e in all_encoder_out)
/ len(all_encoder_out),
"padding_mask": all_encoder_out[0]["padding_mask"],
}
padding = encoder_out["padding_mask"]
else:
encoder_out = all_encoder_out[0]
padding = (
encoder_out["padding_mask"]
if "padding_mask" in encoder_out
else encoder_out["encoder_padding_mask"]
)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out, normalize=True)
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return (
emissions.cpu().float().transpose(0, 1),
padding.cpu() if padding is not None and padding.any() else None,
)
def decode_one(self, logits, padding):
from kaldi.matrix import Matrix
decoder = self.dec_cls(self.fst, self.decoder_options)
asr = self.rec_cls(
decoder, self.symbol_table, acoustic_scale=self.acoustic_scale
)
if padding is not None:
logits = logits[~padding]
mat = Matrix(logits.numpy())
out = asr.decode(mat)
if self.nbest > 1:
from kaldi.fstext import shortestpath
from kaldi.fstext.utils import (
convert_compact_lattice_to_lattice,
convert_lattice_to_std,
convert_nbest_to_list,
get_linear_symbol_sequence,
)
lat = out["lattice"]
sp = shortestpath(lat, nshortest=self.nbest)
sp = convert_compact_lattice_to_lattice(sp)
sp = convert_lattice_to_std(sp)
seq = convert_nbest_to_list(sp)
results = []
for s in seq:
_, o, w = get_linear_symbol_sequence(s)
words = list(self.output_symbols[z] for z in o)
results.append(
{
"tokens": words,
"words": words,
"score": w.value,
"emissions": logits,
}
)
return results
else:
words = out["text"].split()
return [
{
"tokens": words,
"words": words,
"score": out["likelihood"],
"emissions": logits,
}
]
def decode(self, emissions, padding):
if padding is None:
padding = [None] * len(emissions)
ret = list(
map(
lambda e, p: self.executor.submit(self.decode_one, e, p),
emissions,
padding,
)
)
return ret
| 8,265 | 32.738776 | 116 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/kaldi/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/speech_recognition/utils/wer_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from collections import deque
from enum import Enum
import numpy as np
"""
Utility modules for computation of Word Error Rate,
Alignments, as well as more granular metrics like
deletion, insersion and substitutions.
"""
class Code(Enum):
match = 1
substitution = 2
insertion = 3
deletion = 4
class Token(object):
def __init__(self, lbl="", st=np.nan, en=np.nan):
if np.isnan(st):
self.label, self.start, self.end = "", 0.0, 0.0
else:
self.label, self.start, self.end = lbl, st, en
class AlignmentResult(object):
def __init__(self, refs, hyps, codes, score):
self.refs = refs # std::deque<int>
self.hyps = hyps # std::deque<int>
self.codes = codes # std::deque<Code>
self.score = score # float
def coordinate_to_offset(row, col, ncols):
return int(row * ncols + col)
def offset_to_row(offset, ncols):
return int(offset / ncols)
def offset_to_col(offset, ncols):
return int(offset % ncols)
def trimWhitespace(str):
return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str)))
def str2toks(str):
pieces = trimWhitespace(str).split(" ")
toks = []
for p in pieces:
toks.append(Token(p, 0.0, 0.0))
return toks
class EditDistance(object):
def __init__(self, time_mediated):
self.time_mediated_ = time_mediated
self.scores_ = np.nan # Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic>
self.backtraces_ = (
np.nan
) # Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic> backtraces_;
self.confusion_pairs_ = {}
def cost(self, ref, hyp, code):
if self.time_mediated_:
if code == Code.match:
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end)
elif code == Code.insertion:
return hyp.end - hyp.start
elif code == Code.deletion:
return ref.end - ref.start
else: # substitution
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1
else:
if code == Code.match:
return 0
elif code == Code.insertion or code == Code.deletion:
return 3
else: # substitution
return 4
def get_result(self, refs, hyps):
res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan)
num_rows, num_cols = self.scores_.shape
res.score = self.scores_[num_rows - 1, num_cols - 1]
curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols)
while curr_offset != 0:
curr_row = offset_to_row(curr_offset, num_cols)
curr_col = offset_to_col(curr_offset, num_cols)
prev_offset = self.backtraces_[curr_row, curr_col]
prev_row = offset_to_row(prev_offset, num_cols)
prev_col = offset_to_col(prev_offset, num_cols)
res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++
res.hyps.appendleft(curr_col - 1)
if curr_row - 1 == prev_row and curr_col == prev_col:
res.codes.appendleft(Code.deletion)
elif curr_row == prev_row and curr_col - 1 == prev_col:
res.codes.appendleft(Code.insertion)
else:
# assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col)
ref_str = refs[res.refs[0]].label
hyp_str = hyps[res.hyps[0]].label
if ref_str == hyp_str:
res.codes.appendleft(Code.match)
else:
res.codes.appendleft(Code.substitution)
confusion_pair = "%s -> %s" % (ref_str, hyp_str)
if confusion_pair not in self.confusion_pairs_:
self.confusion_pairs_[confusion_pair] = 1
else:
self.confusion_pairs_[confusion_pair] += 1
curr_offset = prev_offset
return res
def align(self, refs, hyps):
if len(refs) == 0 and len(hyps) == 0:
return np.nan
# NOTE: we're not resetting the values in these matrices because every value
# will be overridden in the loop below. If this assumption doesn't hold,
# be sure to set all entries in self.scores_ and self.backtraces_ to 0.
self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1))
self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1))
num_rows, num_cols = self.scores_.shape
for i in range(num_rows):
for j in range(num_cols):
if i == 0 and j == 0:
self.scores_[i, j] = 0.0
self.backtraces_[i, j] = 0
continue
if i == 0:
self.scores_[i, j] = self.scores_[i, j - 1] + self.cost(
None, hyps[j - 1], Code.insertion
)
self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols)
continue
if j == 0:
self.scores_[i, j] = self.scores_[i - 1, j] + self.cost(
refs[i - 1], None, Code.deletion
)
self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols)
continue
# Below here both i and j are greater than 0
ref = refs[i - 1]
hyp = hyps[j - 1]
best_score = self.scores_[i - 1, j - 1] + (
self.cost(ref, hyp, Code.match)
if (ref.label == hyp.label)
else self.cost(ref, hyp, Code.substitution)
)
prev_row = i - 1
prev_col = j - 1
ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion)
if ins < best_score:
best_score = ins
prev_row = i
prev_col = j - 1
delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion)
if delt < best_score:
best_score = delt
prev_row = i - 1
prev_col = j
self.scores_[i, j] = best_score
self.backtraces_[i, j] = coordinate_to_offset(
prev_row, prev_col, num_cols
)
return self.get_result(refs, hyps)
class WERTransformer(object):
def __init__(self, hyp_str, ref_str, verbose=True):
self.ed_ = EditDistance(False)
self.id2oracle_errs_ = {}
self.utts_ = 0
self.words_ = 0
self.insertions_ = 0
self.deletions_ = 0
self.substitutions_ = 0
self.process(["dummy_str", hyp_str, ref_str])
if verbose:
print("'%s' vs '%s'" % (hyp_str, ref_str))
self.report_result()
def process(self, input): # std::vector<std::string>&& input
if len(input) < 3:
print(
"Input must be of the form <id> ... <hypo> <ref> , got ",
len(input),
" inputs:",
)
return None
# Align
# std::vector<Token> hyps;
# std::vector<Token> refs;
hyps = str2toks(input[-2])
refs = str2toks(input[-1])
alignment = self.ed_.align(refs, hyps)
if alignment is None:
print("Alignment is null")
return np.nan
# Tally errors
ins = 0
dels = 0
subs = 0
for code in alignment.codes:
if code == Code.substitution:
subs += 1
elif code == Code.insertion:
ins += 1
elif code == Code.deletion:
dels += 1
# Output
row = input
row.append(str(len(refs)))
row.append(str(ins))
row.append(str(dels))
row.append(str(subs))
# print(row)
# Accumulate
kIdIndex = 0
kNBestSep = "/"
pieces = input[kIdIndex].split(kNBestSep)
if len(pieces) == 0:
print(
"Error splitting ",
input[kIdIndex],
" on '",
kNBestSep,
"', got empty list",
)
return np.nan
id = pieces[0]
if id not in self.id2oracle_errs_:
self.utts_ += 1
self.words_ += len(refs)
self.insertions_ += ins
self.deletions_ += dels
self.substitutions_ += subs
self.id2oracle_errs_[id] = [ins, dels, subs]
else:
curr_err = ins + dels + subs
prev_err = np.sum(self.id2oracle_errs_[id])
if curr_err < prev_err:
self.id2oracle_errs_[id] = [ins, dels, subs]
return 0
def report_result(self):
# print("---------- Summary ---------------")
if self.words_ == 0:
print("No words counted")
return
# 1-best
best_wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
print(
"\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, "
"%0.2f%% dels, %0.2f%% subs)"
% (
best_wer,
self.utts_,
self.words_,
100.0 * self.insertions_ / self.words_,
100.0 * self.deletions_ / self.words_,
100.0 * self.substitutions_ / self.words_,
)
)
def wer(self):
if self.words_ == 0:
wer = np.nan
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
return wer
def stats(self):
if self.words_ == 0:
stats = {}
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
stats = dict(
{
"wer": wer,
"utts": self.utts_,
"numwords": self.words_,
"ins": self.insertions_,
"dels": self.deletions_,
"subs": self.substitutions_,
"confusion_pairs": self.ed_.confusion_pairs_,
}
)
return stats
def calc_wer(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.wer()
def calc_wer_stats(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.stats()
def get_wer_alignment_codes(hyp_str, ref_str):
"""
INPUT: hypothesis string, reference string
OUTPUT: List of alignment codes (intermediate results from WER computation)
"""
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes
def merge_counts(x, y):
# Merge two hashes which have 'counts' as their values
# This can be used for example to merge confusion pair counts
# conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs'])
for k, v in y.items():
if k not in x:
x[k] = 0
x[k] += v
return x
| 11,842 | 30.002618 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/data/collaters.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains collection of classes which implement
collate functionalities for various tasks.
Collaters should know what data to expect for each sample
and they should pack / collate them into batches
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
class Seq2SeqCollater(object):
"""
Implements collate function mainly for seq2seq tasks
This expects each sample to contain feature (src_tokens) and
targets.
This collator is also used for aligned training task.
"""
def __init__(
self,
feature_index=0,
label_index=1,
pad_index=1,
eos_index=2,
move_eos_to_beginning=True,
):
self.feature_index = feature_index
self.label_index = label_index
self.pad_index = pad_index
self.eos_index = eos_index
self.move_eos_to_beginning = move_eos_to_beginning
def _collate_frames(self, frames):
"""Convert a list of 2d frames into a padded 3d tensor
Args:
frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
len_max = max(frame.size(0) for frame in frames)
f_dim = frames[0].size(1)
res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
for i, v in enumerate(frames):
res[i, : v.size(0)] = v
return res
def collate(self, samples):
"""
utility function to collate samples into batch for speech recognition.
"""
if len(samples) == 0:
return {}
# parse samples into torch tensors
parsed_samples = []
for s in samples:
# skip invalid samples
if s["data"][self.feature_index] is None:
continue
source = s["data"][self.feature_index]
if isinstance(source, (np.ndarray, np.generic)):
source = torch.from_numpy(source)
target = s["data"][self.label_index]
if isinstance(target, (np.ndarray, np.generic)):
target = torch.from_numpy(target).long()
elif isinstance(target, list):
target = torch.LongTensor(target)
parsed_sample = {"id": s["id"], "source": source, "target": target}
parsed_samples.append(parsed_sample)
samples = parsed_samples
id = torch.LongTensor([s["id"] for s in samples])
frames = self._collate_frames([s["source"] for s in samples])
# sort samples by descending number of frames
frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
frames_lengths, sort_order = frames_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
frames = frames.index_select(0, sort_order)
target = None
target_lengths = None
prev_output_tokens = None
if samples[0].get("target", None) is not None:
ntokens = sum(len(s["target"]) for s in samples)
target = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, sort_order)
target_lengths = torch.LongTensor(
[s["target"].size(0) for s in samples]
).index_select(0, sort_order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=self.move_eos_to_beginning,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
"target": target,
"target_lengths": target_lengths,
"nsentences": len(samples),
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
| 4,796 | 35.340909 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/data/replabels.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Replabel transforms for use with flashlight's ASG criterion.
"""
def replabel_symbol(i):
"""
Replabel symbols used in flashlight, currently just "1", "2", ...
This prevents training with numeral tokens, so this might change in the future
"""
return str(i)
def pack_replabels(tokens, dictionary, max_reps):
"""
Pack a token sequence so that repeated symbols are replaced by replabels
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_value_to_idx = [0] * (max_reps + 1)
for i in range(1, max_reps + 1):
replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i))
result = []
prev_token = -1
num_reps = 0
for token in tokens:
if token == prev_token and num_reps < max_reps:
num_reps += 1
else:
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
num_reps = 0
result.append(token)
prev_token = token
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
return result
def unpack_replabels(tokens, dictionary, max_reps):
"""
Unpack a token sequence so that replabels are replaced by repeated symbols
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_idx_to_value = {}
for i in range(1, max_reps + 1):
replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i
result = []
prev_token = -1
for token in tokens:
try:
for _ in range(replabel_idx_to_value[token]):
result.append(prev_token)
prev_token = -1
except KeyError:
result.append(token)
prev_token = token
return result
| 1,970 | 26.760563 | 82 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/data/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def calc_mean_invstddev(feature):
if len(feature.size()) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = feature.mean(0)
var = feature.var(0)
# avoid division by ~zero
eps = 1e-8
if (var < eps).any():
return mean, 1.0 / (torch.sqrt(var) + eps)
return mean, 1.0 / torch.sqrt(var)
def apply_mv_norm(features):
# If there is less than 2 spectrograms, the variance cannot be computed (is NaN)
# and normalization is not possible, so return the item as it is
if features.size(0) < 2:
return features
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def lengths_to_encoder_padding_mask(lengths, batch_first=False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = 0 for t < lengths[b] and 1 otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) >= lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
def encoder_padding_mask_to_lengths(
encoder_padding_mask, max_lengths, batch_size, device
):
"""
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor
Conventionally, encoder output contains a encoder_padding_mask, which is
a 2-D mask in a shape (T, B), whose (t, b) element indicate whether
encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we
need to convert this mask tensor to a 1-D tensor in shape (B, ), where
[b] denotes the valid length of b-th sequence
Args:
encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,
indicating all are valid
Return:
seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the
number of valid elements of b-th sequence
max_lengths: maximum length of all sequence, if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(0)
batch_size: batch size; if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(1)
device: which device to put the result on
"""
if encoder_padding_mask is None:
return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)
assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match"
assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match"
return max_lengths - torch.sum(encoder_padding_mask, dim=0)
| 3,429 | 32.960396 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/data/asr_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from fairseq.data import FairseqDataset
from . import data_utils
from .collaters import Seq2SeqCollater
class AsrDataset(FairseqDataset):
"""
A dataset representing speech and corresponding transcription.
Args:
aud_paths: (List[str]): A list of str with paths to audio files.
aud_durations_ms (List[int]): A list of int containing the durations of
audio files.
tgt (List[torch.LongTensor]): A list of LongTensors containing the indices
of target transcriptions.
tgt_dict (~fairseq.data.Dictionary): target vocabulary.
ids (List[str]): A list of utterance IDs.
speakers (List[str]): A list of speakers corresponding to utterances.
num_mel_bins (int): Number of triangular mel-frequency bins (default: 80)
frame_length (float): Frame length in milliseconds (default: 25.0)
frame_shift (float): Frame shift in milliseconds (default: 10.0)
"""
def __init__(
self,
aud_paths,
aud_durations_ms,
tgt,
tgt_dict,
ids,
speakers,
num_mel_bins=80,
frame_length=25.0,
frame_shift=10.0,
):
assert frame_length > 0
assert frame_shift > 0
assert all(x > frame_length for x in aud_durations_ms)
self.frame_sizes = [
int(1 + (d - frame_length) / frame_shift) for d in aud_durations_ms
]
assert len(aud_paths) > 0
assert len(aud_paths) == len(aud_durations_ms)
assert len(aud_paths) == len(tgt)
assert len(aud_paths) == len(ids)
assert len(aud_paths) == len(speakers)
self.aud_paths = aud_paths
self.tgt_dict = tgt_dict
self.tgt = tgt
self.ids = ids
self.speakers = speakers
self.num_mel_bins = num_mel_bins
self.frame_length = frame_length
self.frame_shift = frame_shift
self.s2s_collater = Seq2SeqCollater(
0,
1,
pad_index=self.tgt_dict.pad(),
eos_index=self.tgt_dict.eos(),
move_eos_to_beginning=True,
)
def __getitem__(self, index):
import torchaudio
import torchaudio.compliance.kaldi as kaldi
tgt_item = self.tgt[index] if self.tgt is not None else None
path = self.aud_paths[index]
if not os.path.exists(path):
raise FileNotFoundError("Audio file not found: {}".format(path))
sound, sample_rate = torchaudio.load_wav(path)
output = kaldi.fbank(
sound,
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
frame_shift=self.frame_shift,
)
output_cmvn = data_utils.apply_mv_norm(output)
return {"id": index, "data": [output_cmvn.detach(), tgt_item]}
def __len__(self):
return len(self.aud_paths)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[int]): sample indices to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
return self.s2s_collater.collate(samples)
def num_tokens(self, index):
return self.frame_sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.frame_sizes[index],
len(self.tgt[index]) if self.tgt is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self))
| 3,955 | 31.162602 | 82 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/data/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .asr_dataset import AsrDataset
__all__ = [
"AsrDataset",
]
| 248 | 19.75 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/tasks/speech_recognition.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import re
import sys
import torch
from examples.speech_recognition.data import AsrDataset
from examples.speech_recognition.data.replabels import replabel_symbol
from fairseq.data import Dictionary
from fairseq.tasks import LegacyFairseqTask, register_task
def get_asr_dataset_from_json(data_json_path, tgt_dict):
"""
Parse data json and create dataset.
See scripts/asr_prep_json.py which pack json from raw files
Json example:
{
"utts": {
"4771-29403-0025": {
"input": {
"length_ms": 170,
"path": "/tmp/file1.flac"
},
"output": {
"text": "HELLO \n",
"token": "HE LLO",
"tokenid": "4815, 861"
}
},
"1564-142299-0096": {
...
}
}
"""
if not os.path.isfile(data_json_path):
raise FileNotFoundError("Dataset not found: {}".format(data_json_path))
with open(data_json_path, "rb") as f:
data_samples = json.load(f)["utts"]
assert len(data_samples) != 0
sorted_samples = sorted(
data_samples.items(),
key=lambda sample: int(sample[1]["input"]["length_ms"]),
reverse=True,
)
aud_paths = [s[1]["input"]["path"] for s in sorted_samples]
ids = [s[0] for s in sorted_samples]
speakers = []
for s in sorted_samples:
m = re.search("(.+?)-(.+?)-(.+?)", s[0])
speakers.append(m.group(1) + "_" + m.group(2))
frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples]
tgt = [
[int(i) for i in s[1]["output"]["tokenid"].split(", ")]
for s in sorted_samples
]
# append eos
tgt = [[*t, tgt_dict.eos()] for t in tgt]
return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers)
@register_task("speech_recognition")
class SpeechRecognitionTask(LegacyFairseqTask):
"""
Task for training speech recognition model.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--silence-token", default="\u2581", help="token for silence (used by w2l)"
)
parser.add_argument(
"--max-source-positions",
default=sys.maxsize,
type=int,
metavar="N",
help="max number of frames in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries)."""
dict_path = os.path.join(args.data, "dict.txt")
if not os.path.isfile(dict_path):
raise FileNotFoundError("Dict not found: {}".format(dict_path))
tgt_dict = Dictionary.load(dict_path)
if args.criterion == "ctc_loss":
tgt_dict.add_symbol("<ctc_blank>")
elif args.criterion == "asg_loss":
for i in range(1, args.max_replabel + 1):
tgt_dict.add_symbol(replabel_symbol(i))
print("| dictionary: {} types".format(len(tgt_dict)))
return cls(args, tgt_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
data_json_path = os.path.join(self.args.data, "{}.json".format(split))
self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)
def build_generator(self, models, args, **unused):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, self.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, self.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, self.target_dictionary)
else:
return super().build_generator(models, args)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.tgt_dict
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
return None
def max_positions(self):
"""Return the max speech and sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
| 5,397 | 33.164557 | 87 |
py
|
sign-topic
|
sign-topic-main/examples/speech_recognition/tasks/__init__.py
|
import importlib
import os
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
task_name = file[: file.find(".py")]
importlib.import_module("examples.speech_recognition.tasks." + task_name)
| 273 | 29.444444 | 81 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/generate_waveform.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import logging
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import soundfile as sf
import sys
import torch
import torchaudio
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.tasks.text_to_speech import plot_tts_output
from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDataset
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def make_parser():
parser = options.get_speech_generation_parser()
parser.add_argument("--dump-features", action="store_true")
parser.add_argument("--dump-waveforms", action="store_true")
parser.add_argument("--dump-attentions", action="store_true")
parser.add_argument("--dump-eos-probs", action="store_true")
parser.add_argument("--dump-plots", action="store_true")
parser.add_argument("--dump-target", action="store_true")
parser.add_argument("--output-sample-rate", default=22050, type=int)
parser.add_argument("--teacher-forcing", action="store_true")
parser.add_argument(
"--audio-format", type=str, default="wav", choices=["wav", "flac"]
)
return parser
def postprocess_results(
dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target
):
def to_np(x):
return None if x is None else x.detach().cpu().numpy()
sample_ids = [dataset.ids[i] for i in sample["id"].tolist()]
texts = sample["src_texts"] if "src_texts" in sample else [""] * len(hypos)
attns = [to_np(hypo["attn"]) for hypo in hypos]
eos_probs = [to_np(hypo.get("eos_prob", None)) for hypo in hypos]
feat_preds = [to_np(hypo["feature"]) for hypo in hypos]
wave_preds = [to_np(resample_fn(h["waveform"])) for h in hypos]
if dump_target:
feat_targs = [to_np(hypo["targ_feature"]) for hypo in hypos]
wave_targs = [to_np(resample_fn(h["targ_waveform"])) for h in hypos]
else:
feat_targs = [None for _ in hypos]
wave_targs = [None for _ in hypos]
return zip(sample_ids, texts, attns, eos_probs, feat_preds, wave_preds,
feat_targs, wave_targs)
def dump_result(
is_na_model,
args,
vocoder,
sample_id,
text,
attn,
eos_prob,
feat_pred,
wave_pred,
feat_targ,
wave_targ,
):
sample_rate = args.output_sample_rate
out_root = Path(args.results_path)
if args.dump_features:
feat_dir = out_root / "feat"
feat_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_dir / f"{sample_id}.npy", feat_pred)
if args.dump_target:
feat_tgt_dir = out_root / "feat_tgt"
feat_tgt_dir.mkdir(exist_ok=True, parents=True)
np.save(feat_tgt_dir / f"{sample_id}.npy", feat_targ)
if args.dump_attentions:
attn_dir = out_root / "attn"
attn_dir.mkdir(exist_ok=True, parents=True)
np.save(attn_dir / f"{sample_id}.npy", attn.numpy())
if args.dump_eos_probs and not is_na_model:
eos_dir = out_root / "eos"
eos_dir.mkdir(exist_ok=True, parents=True)
np.save(eos_dir / f"{sample_id}.npy", eos_prob)
if args.dump_plots:
images = [feat_pred.T] if is_na_model else [feat_pred.T, attn]
names = ["output"] if is_na_model else ["output", "alignment"]
if feat_targ is not None:
images = [feat_targ.T] + images
names = [f"target (idx={sample_id})"] + names
if is_na_model:
plot_tts_output(images, names, attn, "alignment", suptitle=text)
else:
plot_tts_output(images, names, eos_prob, "eos prob", suptitle=text)
plot_dir = out_root / "plot"
plot_dir.mkdir(exist_ok=True, parents=True)
plt.savefig(plot_dir / f"{sample_id}.png")
plt.close()
if args.dump_waveforms:
ext = args.audio_format
if wave_pred is not None:
wav_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}"
wav_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_dir / f"{sample_id}.{ext}", wave_pred, sample_rate)
if args.dump_target and wave_targ is not None:
wav_tgt_dir = out_root / f"{ext}_{sample_rate}hz_{vocoder}_tgt"
wav_tgt_dir.mkdir(exist_ok=True, parents=True)
sf.write(wav_tgt_dir / f"{sample_id}.{ext}", wave_targ, sample_rate)
def main(args):
assert(args.dump_features or args.dump_waveforms or args.dump_attentions
or args.dump_eos_probs or args.dump_plots)
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 8000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
task = tasks.setup_task(args)
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
task=task,
arg_overrides=ast.literal_eval(args.model_overrides),
)
model = models[0].cuda() if use_cuda else models[0]
# use the original n_frames_per_step
task.args.n_frames_per_step = saved_cfg.task.n_frames_per_step
task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
data_cfg = task.data_cfg
sample_rate = data_cfg.config.get("features", {}).get("sample_rate", 22050)
resample_fn = {
False: lambda x: x,
True: lambda x: torchaudio.sox_effects.apply_effects_tensor(
x.detach().cpu().unsqueeze(0), sample_rate,
[['rate', str(args.output_sample_rate)]]
)[0].squeeze(0)
}.get(args.output_sample_rate != sample_rate)
if args.output_sample_rate != sample_rate:
logger.info(f"resampling to {args.output_sample_rate}Hz")
generator = task.build_generator([model], args)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
Path(args.results_path).mkdir(exist_ok=True, parents=True)
is_na_model = getattr(model, "NON_AUTOREGRESSIVE", False)
dataset = task.dataset(args.gen_subset)
vocoder = task.args.vocoder
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
hypos = generator.generate(model, sample, has_targ=args.dump_target)
for result in postprocess_results(
dataset, sample, hypos, resample_fn, args.dump_target
):
dump_result(is_na_model, args, vocoder, *result)
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 7,339 | 37.031088 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from scipy.interpolate import interp1d
import torchaudio
from fairseq.tasks.text_to_speech import (
batch_compute_distortion, compute_rms_dist
)
def batch_mel_spectral_distortion(
y1, y2, sr, normalize_type="path", mel_fn=None
):
"""
https://arxiv.org/pdf/2011.03568.pdf
Same as Mel Cepstral Distortion, but computed on log-mel spectrograms.
"""
if mel_fn is None or mel_fn.sample_rate != sr:
mel_fn = torchaudio.transforms.MelSpectrogram(
sr, n_fft=int(0.05 * sr), win_length=int(0.05 * sr),
hop_length=int(0.0125 * sr), f_min=20, n_mels=80,
window_fn=torch.hann_window
).to(y1[0].device)
offset = 1e-6
return batch_compute_distortion(
y1, y2, sr, lambda y: torch.log(mel_fn(y) + offset).transpose(-1, -2),
compute_rms_dist, normalize_type
)
# This code is based on
# "https://github.com/bastibe/MAPS-Scripts/blob/master/helper.py"
def _same_t_in_true_and_est(func):
def new_func(true_t, true_f, est_t, est_f):
assert type(true_t) is np.ndarray
assert type(true_f) is np.ndarray
assert type(est_t) is np.ndarray
assert type(est_f) is np.ndarray
interpolated_f = interp1d(
est_t, est_f, bounds_error=False, kind='nearest', fill_value=0
)(true_t)
return func(true_t, true_f, true_t, interpolated_f)
return new_func
@_same_t_in_true_and_est
def gross_pitch_error(true_t, true_f, est_t, est_f):
"""The relative frequency in percent of pitch estimates that are
outside a threshold around the true pitch. Only frames that are
considered pitched by both the ground truth and the estimator (if
applicable) are considered.
"""
correct_frames = _true_voiced_frames(true_t, true_f, est_t, est_f)
gross_pitch_error_frames = _gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
return np.sum(gross_pitch_error_frames) / np.sum(correct_frames)
def _gross_pitch_error_frames(true_t, true_f, est_t, est_f, eps=1e-8):
voiced_frames = _true_voiced_frames(true_t, true_f, est_t, est_f)
true_f_p_eps = [x + eps for x in true_f]
pitch_error_frames = np.abs(est_f / true_f_p_eps - 1) > 0.2
return voiced_frames & pitch_error_frames
def _true_voiced_frames(true_t, true_f, est_t, est_f):
return (est_f != 0) & (true_f != 0)
def _voicing_decision_error_frames(true_t, true_f, est_t, est_f):
return (est_f != 0) != (true_f != 0)
@_same_t_in_true_and_est
def f0_frame_error(true_t, true_f, est_t, est_f):
gross_pitch_error_frames = _gross_pitch_error_frames(
true_t, true_f, est_t, est_f
)
voicing_decision_error_frames = _voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return (np.sum(gross_pitch_error_frames) +
np.sum(voicing_decision_error_frames)) / (len(true_t))
@_same_t_in_true_and_est
def voicing_decision_error(true_t, true_f, est_t, est_f):
voicing_decision_error_frames = _voicing_decision_error_frames(
true_t, true_f, est_t, est_f
)
return np.sum(voicing_decision_error_frames) / (len(true_t))
| 3,357 | 31.921569 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/data_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
from pathlib import Path
from typing import Optional, List, Dict
import zipfile
import tempfile
from dataclasses import dataclass
from itertools import groupby
import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_tsv_to_dicts
from fairseq.data.audio.audio_utils import (
TTSSpectrogram, TTSMelScale, parse_path, read_from_stored_zip, is_npy_data
)
def trim_or_pad_to_target_length(
data_1d_or_2d: np.ndarray, target_length: int
) -> np.ndarray:
assert len(data_1d_or_2d.shape) in {1, 2}
delta = data_1d_or_2d.shape[0] - target_length
if delta >= 0: # trim if being longer
data_1d_or_2d = data_1d_or_2d[: target_length]
else: # pad if being shorter
if len(data_1d_or_2d.shape) == 1:
data_1d_or_2d = np.concatenate(
[data_1d_or_2d, np.zeros(-delta)], axis=0
)
else:
data_1d_or_2d = np.concatenate(
[data_1d_or_2d, np.zeros((-delta, data_1d_or_2d.shape[1]))],
axis=0
)
return data_1d_or_2d
def extract_logmel_spectrogram(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, win_length: int = 1024,
hop_length: int = 256, n_fft: int = 1024,
win_fn: callable = torch.hann_window, n_mels: int = 80,
f_min: float = 0., f_max: float = 8000, eps: float = 1e-5,
overwrite: bool = False, target_length: Optional[int] = None
):
if output_path is not None and output_path.is_file() and not overwrite:
return
spectrogram_transform = TTSSpectrogram(
n_fft=n_fft, win_length=win_length, hop_length=hop_length,
window_fn=win_fn
)
mel_scale_transform = TTSMelScale(
n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max,
n_stft=n_fft // 2 + 1
)
spectrogram = spectrogram_transform(waveform)
mel_spec = mel_scale_transform(spectrogram)
logmel_spec = torch.clamp(mel_spec, min=eps).log()
assert len(logmel_spec.shape) == 3 and logmel_spec.shape[0] == 1
logmel_spec = logmel_spec.squeeze().t() # D x T -> T x D
if target_length is not None:
logmel_spec = trim_or_pad_to_target_length(logmel_spec, target_length)
if output_path is not None:
np.save(output_path.as_posix(), logmel_spec)
else:
return logmel_spec
def extract_pitch(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, hop_length: int = 256,
log_scale: bool = True, phoneme_durations: Optional[List[int]] = None
):
if output_path is not None and output_path.is_file():
return
try:
import pyworld
except ImportError:
raise ImportError("Please install PyWORLD: pip install pyworld")
_waveform = waveform.squeeze(0).double().numpy()
pitch, t = pyworld.dio(
_waveform, sample_rate, frame_period=hop_length / sample_rate * 1000
)
pitch = pyworld.stonemask(_waveform, pitch, t, sample_rate)
if phoneme_durations is not None:
pitch = trim_or_pad_to_target_length(pitch, sum(phoneme_durations))
try:
from scipy.interpolate import interp1d
except ImportError:
raise ImportError("Please install SciPy: pip install scipy")
nonzero_ids = np.where(pitch != 0)[0]
if len(nonzero_ids) == 0:
print((f"{output_path} has all empty values in the pitch contour"))
return
elif len(nonzero_ids) == 1:
print((f"{output_path} has only one non-zero values in the pitch contour"))
return
else:
interp_fn = interp1d(
nonzero_ids,
pitch[nonzero_ids],
fill_value=(pitch[nonzero_ids[0]], pitch[nonzero_ids[-1]]),
bounds_error=False,
)
pitch = interp_fn(np.arange(0, len(pitch)))
d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations]))
pitch = np.array(
[
np.mean(pitch[d_cumsum[i-1]: d_cumsum[i]])
for i in range(1, len(d_cumsum))
]
)
assert len(pitch) == len(phoneme_durations)
if log_scale:
pitch = np.log(pitch + 1)
if output_path is not None:
np.save(output_path.as_posix(), pitch)
else:
return pitch
def extract_energy(
waveform: torch.Tensor, output_path: Optional[Path] = None,
hop_length: int = 256, n_fft: int = 1024, log_scale: bool = True,
phoneme_durations: Optional[List[int]] = None
):
if output_path is not None and output_path.is_file():
return
assert len(waveform.shape) == 2 and waveform.shape[0] == 1
waveform = waveform.view(1, 1, waveform.shape[1])
waveform = F.pad(
waveform.unsqueeze(1), [n_fft // 2, n_fft // 2, 0, 0],
mode="reflect"
)
waveform = waveform.squeeze(1)
fourier_basis = np.fft.fft(np.eye(n_fft))
cutoff = int((n_fft / 2 + 1))
fourier_basis = np.vstack(
[np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])]
)
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
forward_transform = F.conv1d(
waveform, forward_basis, stride=hop_length, padding=0
)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
energy = torch.norm(magnitude, dim=1).squeeze(0).numpy()
if phoneme_durations is not None:
energy = trim_or_pad_to_target_length(energy, sum(phoneme_durations))
d_cumsum = np.cumsum(np.concatenate([np.array([0]), phoneme_durations]))
energy = np.array(
[
np.mean(energy[d_cumsum[i - 1]: d_cumsum[i]])
for i in range(1, len(d_cumsum))
]
)
assert len(energy) == len(phoneme_durations)
if log_scale:
energy = np.log(energy + 1)
if output_path is not None:
np.save(output_path.as_posix(), energy)
else:
return energy
def get_global_cmvn(feature_root: Path, output_path: Optional[Path] = None):
mean_x, mean_x2, n_frames = None, None, 0
feature_paths = feature_root.glob("*.npy")
for p in tqdm(feature_paths):
with open(p, 'rb') as f:
frames = np.load(f).squeeze()
n_frames += frames.shape[0]
cur_mean_x = frames.sum(axis=0)
if mean_x is None:
mean_x = cur_mean_x
else:
mean_x += cur_mean_x
cur_mean_x2 = (frames ** 2).sum(axis=0)
if mean_x2 is None:
mean_x2 = cur_mean_x2
else:
mean_x2 += cur_mean_x2
mean_x /= n_frames
mean_x2 /= n_frames
var_x = mean_x2 - mean_x ** 2
std_x = np.sqrt(np.maximum(var_x, 1e-10))
if output_path is not None:
with open(output_path, 'wb') as f:
np.savez(f, mean=mean_x, std=std_x)
else:
return {"mean": mean_x, "std": std_x}
def ipa_phonemize(text, lang="en-us", use_g2p=False):
if use_g2p:
assert lang == "en-us", "g2pE phonemizer only works for en-us"
try:
from g2p_en import G2p
g2p = G2p()
return " ".join("|" if p == " " else p for p in g2p(text))
except ImportError:
raise ImportError(
"Please install phonemizer: pip install g2p_en"
)
else:
try:
from phonemizer import phonemize
from phonemizer.separator import Separator
return phonemize(
text, backend='espeak', language=lang,
separator=Separator(word="| ", phone=" ")
)
except ImportError:
raise ImportError(
"Please install phonemizer: pip install phonemizer"
)
@dataclass
class ForceAlignmentInfo(object):
tokens: List[str]
frame_durations: List[int]
start_sec: Optional[float]
end_sec: Optional[float]
def get_mfa_alignment_by_sample_id(
textgrid_zip_path: str, sample_id: str, sample_rate: int,
hop_length: int, silence_phones: List[str] = ("sil", "sp", "spn")
) -> ForceAlignmentInfo:
try:
import tgt
except ImportError:
raise ImportError("Please install TextGridTools: pip install tgt")
filename = f"{sample_id}.TextGrid"
out_root = Path(tempfile.gettempdir())
tgt_path = out_root / filename
with zipfile.ZipFile(textgrid_zip_path) as f_zip:
f_zip.extract(filename, path=out_root)
textgrid = tgt.io.read_textgrid(tgt_path.as_posix())
os.remove(tgt_path)
phones, frame_durations = [], []
start_sec, end_sec, end_idx = 0, 0, 0
for t in textgrid.get_tier_by_name("phones")._objects:
s, e, p = t.start_time, t.end_time, t.text
# Trim leading silences
if len(phones) == 0:
if p in silence_phones:
continue
else:
start_sec = s
phones.append(p)
if p not in silence_phones:
end_sec = e
end_idx = len(phones)
r = sample_rate / hop_length
frame_durations.append(int(np.round(e * r) - np.round(s * r)))
# Trim tailing silences
phones = phones[:end_idx]
frame_durations = frame_durations[:end_idx]
return ForceAlignmentInfo(
tokens=phones, frame_durations=frame_durations, start_sec=start_sec,
end_sec=end_sec
)
def get_mfa_alignment(
textgrid_zip_path: str, sample_ids: List[str], sample_rate: int,
hop_length: int
) -> Dict[str, ForceAlignmentInfo]:
return {
i: get_mfa_alignment_by_sample_id(
textgrid_zip_path, i, sample_rate, hop_length
) for i in tqdm(sample_ids)
}
def get_unit_alignment(
id_to_unit_tsv_path: str, sample_ids: List[str]
) -> Dict[str, ForceAlignmentInfo]:
id_to_units = {
e["id"]: e["units"] for e in load_tsv_to_dicts(id_to_unit_tsv_path)
}
id_to_units = {i: id_to_units[i].split() for i in sample_ids}
id_to_units_collapsed = {
i: [uu for uu, _ in groupby(u)] for i, u in id_to_units.items()
}
id_to_durations = {
i: [len(list(g)) for _, g in groupby(u)] for i, u in id_to_units.items()
}
return {
i: ForceAlignmentInfo(
tokens=id_to_units_collapsed[i], frame_durations=id_to_durations[i],
start_sec=None, end_sec=None
)
for i in sample_ids
}
def get_feature_value_min_max(feature_paths: List[str]):
v_min, v_max = 1e-8, -1e-8
for p in tqdm(feature_paths):
_path, slice_ptr = parse_path(p)
assert len(slice_ptr) == 2
byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
assert is_npy_data(byte_data)
path_or_fp = io.BytesIO(byte_data)
features = np.load(path_or_fp).squeeze()
v_min = min(v_min, features.min().item())
v_max = max(v_max, features.max().item())
return v_min, v_max
| 11,364 | 31.942029 | 87 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 177 | 34.6 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/evaluation/eval_sp.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Signal processing-based evaluation using waveforms
"""
import csv
import numpy as np
import os.path as op
import torch
import tqdm
from tabulate import tabulate
import torchaudio
from examples.speech_synthesis.utils import batch_mel_spectral_distortion
from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion
def load_eval_spec(path):
with open(path) as f:
reader = csv.DictReader(f, delimiter='\t')
samples = list(reader)
return samples
def eval_distortion(samples, distortion_fn, device="cuda"):
nmiss = 0
results = []
for sample in tqdm.tqdm(samples):
if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
nmiss += 1
results.append(None)
continue
# assume single channel
yref, sr = torchaudio.load(sample["ref"])
ysyn, _sr = torchaudio.load(sample["syn"])
yref, ysyn = yref[0].to(device), ysyn[0].to(device)
assert sr == _sr, f"{sr} != {_sr}"
distortion, extra = distortion_fn([yref], [ysyn], sr, None)[0]
_, _, _, _, _, pathmap = extra
nins = torch.sum(pathmap.sum(dim=1) - 1) # extra frames in syn
ndel = torch.sum(pathmap.sum(dim=0) - 1) # missing frames from syn
results.append(
(distortion.item(), # path distortion
pathmap.size(0), # yref num frames
pathmap.size(1), # ysyn num frames
pathmap.sum().item(), # path length
nins.item(), # insertion
ndel.item(), # deletion
)
)
return results
def eval_mel_cepstral_distortion(samples, device="cuda"):
return eval_distortion(samples, batch_mel_cepstral_distortion, device)
def eval_mel_spectral_distortion(samples, device="cuda"):
return eval_distortion(samples, batch_mel_spectral_distortion, device)
def print_results(results, show_bin):
results = np.array(list(filter(lambda x: x is not None, results)))
np.set_printoptions(precision=3)
def _print_result(results):
dist, dur_ref, dur_syn, dur_ali, nins, ndel = results.sum(axis=0)
res = {
"nutt": len(results),
"dist": dist,
"dur_ref": int(dur_ref),
"dur_syn": int(dur_syn),
"dur_ali": int(dur_ali),
"dist_per_ref_frm": dist/dur_ref,
"dist_per_syn_frm": dist/dur_syn,
"dist_per_ali_frm": dist/dur_ali,
"ins": nins/dur_ref,
"del": ndel/dur_ref,
}
print(tabulate(
[res.values()],
res.keys(),
floatfmt=".4f"
))
print(">>>> ALL")
_print_result(results)
if show_bin:
edges = [0, 200, 400, 600, 800, 1000, 2000, 4000]
for i in range(1, len(edges)):
mask = np.logical_and(results[:, 1] >= edges[i-1],
results[:, 1] < edges[i])
if not mask.any():
continue
bin_results = results[mask]
print(f">>>> ({edges[i-1]}, {edges[i]})")
_print_result(bin_results)
def main(eval_spec, mcd, msd, show_bin):
samples = load_eval_spec(eval_spec)
device = "cpu"
if mcd:
print("===== Evaluate Mean Cepstral Distortion =====")
results = eval_mel_cepstral_distortion(samples, device)
print_results(results, show_bin)
if msd:
print("===== Evaluate Mean Spectral Distortion =====")
results = eval_mel_spectral_distortion(samples, device)
print_results(results, show_bin)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("eval_spec")
parser.add_argument("--mcd", action="store_true")
parser.add_argument("--msd", action="store_true")
parser.add_argument("--show-bin", action="store_true")
args = parser.parse_args()
main(args.eval_spec, args.mcd, args.msd, args.show_bin)
| 4,149 | 30.439394 | 75 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/evaluation/get_eval_manifest.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
from pathlib import Path
def main(args):
"""
`uid syn ref text`
"""
in_root = Path(args.generation_root).resolve()
ext = args.audio_format
with open(args.audio_manifest) as f, open(args.output_path, "w") as f_out:
reader = csv.DictReader(
f, delimiter="\t", quotechar=None, doublequote=False,
lineterminator="\n", quoting=csv.QUOTE_NONE
)
header = ["id", "syn", "ref", "text", "speaker"]
f_out.write("\t".join(header) + "\n")
for row in reader:
dir_name = f"{ext}_{args.sample_rate}hz_{args.vocoder}"
id_ = row["id"]
syn = (in_root / dir_name / f"{id_}.{ext}").as_posix()
ref = row["audio"]
if args.use_resynthesized_target:
ref = (in_root / f"{dir_name}_tgt" / f"{id_}.{ext}").as_posix()
if args.eval_target:
syn = row["audio"]
sample = [id_, syn, ref, row["tgt_text"], row["speaker"]]
f_out.write("\t".join(sample) + "\n")
print(f"wrote evaluation file to {args.output_path}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--generation-root", help="output directory for generate_waveform.py"
)
parser.add_argument(
"--audio-manifest",
help="used to determine the original utterance ID and text"
)
parser.add_argument(
"--output-path", help="path to output evaluation spec file"
)
parser.add_argument(
"--use-resynthesized-target", action="store_true",
help="use resynthesized reference instead of the original audio"
)
parser.add_argument(
"--eval-target", action="store_true",
help="evaluate reference instead of model prediction"
)
parser.add_argument("--vocoder", type=str, default="griffin_lim")
parser.add_argument("--sample-rate", type=int, default=22_050)
parser.add_argument("--audio-format", type=str, default="wav")
args = parser.parse_args()
main(args)
| 2,268 | 33.907692 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/evaluation/eval_asr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import editdistance
import re
import shutil
import soundfile as sf
import subprocess
from pathlib import Path
from examples.speech_to_text.data_utils import load_tsv_to_dicts
def preprocess_text(text):
text = "|".join(re.sub(r"[^A-Z' ]", " ", text.upper()).split())
text = " ".join(text)
return text
def prepare_w2v_data(
dict_dir, sample_rate, label, audio_paths, texts, split, data_dir
):
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(
dict_dir / f"dict.{label}.txt",
data_dir / f"dict.{label}.txt"
)
with open(data_dir / f"{split}.tsv", "w") as f:
f.write("/\n")
for audio_path in audio_paths:
wav, sr = sf.read(audio_path)
assert sr == sample_rate, f"{sr} != sample_rate"
nsample = len(wav)
f.write(f"{audio_path}\t{nsample}\n")
with open(data_dir / f"{split}.{label}", "w") as f:
for text in texts:
text = preprocess_text(text)
f.write(f"{text}\n")
def run_asr(asr_dir, split, w2v_ckpt, w2v_label, res_dir):
"""
results will be saved at
{res_dir}/{ref,hypo}.word-{w2v_ckpt.filename}-{split}.txt
"""
cmd = ["python", "-m", "examples.speech_recognition.infer"]
cmd += [str(asr_dir.resolve())]
cmd += ["--task", "audio_finetuning", "--nbest", "1", "--quiet"]
cmd += ["--w2l-decoder", "viterbi", "--criterion", "ctc"]
cmd += ["--post-process", "letter", "--max-tokens", "4000000"]
cmd += ["--path", str(w2v_ckpt.resolve()), "--labels", w2v_label]
cmd += ["--gen-subset", split, "--results-path", str(res_dir.resolve())]
print(f"running cmd:\n{' '.join(cmd)}")
subprocess.run(cmd, check=True)
def compute_error_rate(hyp_wrd_path, ref_wrd_path, unit="word"):
"""each line is "<text> (None-<index>)" """
tokenize_line = {
"word": lambda x: re.sub(r" \(.*\)$", "", x.rstrip()).split(),
"char": lambda x: list(re.sub(r" \(.*\)$", "", x.rstrip()))
}.get(unit)
if tokenize_line is None:
raise ValueError(f"{unit} not supported")
inds = [int(re.sub(r"\D*(\d*)\D*", r"\1", line))
for line in open(hyp_wrd_path)]
hyps = [tokenize_line(line) for line in open(hyp_wrd_path)]
refs = [tokenize_line(line) for line in open(ref_wrd_path)]
assert(len(hyps) == len(refs))
err_rates = [
editdistance.eval(hyp, ref) / len(ref) for hyp, ref in zip(hyps, refs)
]
ind_to_err_rates = {i: e for i, e in zip(inds, err_rates)}
return ind_to_err_rates
def main(args):
samples = load_tsv_to_dicts(args.raw_manifest)
ids = [
sample[args.id_header] if args.id_header else "" for sample in samples
]
audio_paths = [sample[args.audio_header] for sample in samples]
texts = [sample[args.text_header] for sample in samples]
prepare_w2v_data(
args.w2v_dict_dir,
args.w2v_sample_rate,
args.w2v_label,
audio_paths,
texts,
args.split,
args.asr_dir
)
run_asr(args.asr_dir, args.split, args.w2v_ckpt, args.w2v_label, args.asr_dir)
ind_to_err_rates = compute_error_rate(
args.asr_dir / f"hypo.word-{args.w2v_ckpt.name}-{args.split}.txt",
args.asr_dir / f"ref.word-{args.w2v_ckpt.name}-{args.split}.txt",
args.err_unit,
)
uer_path = args.asr_dir / f"uer_{args.err_unit}.{args.split}.tsv"
with open(uer_path, "w") as f:
f.write("id\taudio\tuer\n")
for ind, (id_, audio_path) in enumerate(zip(ids, audio_paths)):
f.write(f"{id_}\t{audio_path}\t{ind_to_err_rates[ind]:.4f}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--raw-manifest", required=True, type=Path)
parser.add_argument("--asr-dir", required=True, type=Path)
parser.add_argument("--id-header", default="id", type=str)
parser.add_argument("--audio-header", default="audio", type=str)
parser.add_argument("--text-header", default="src_text", type=str)
parser.add_argument("--split", default="raw", type=str)
parser.add_argument("--w2v-ckpt", required=True, type=Path)
parser.add_argument("--w2v-dict-dir", required=True, type=Path)
parser.add_argument("--w2v-sample-rate", default=16000, type=int)
parser.add_argument("--w2v-label", default="ltr", type=str)
parser.add_argument("--err-unit", default="word", type=str)
args = parser.parse_args()
main(args)
| 4,654 | 35.085271 | 82 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/evaluation/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 177 | 34.6 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/evaluation/eval_f0.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Signal processing-based evaluation using waveforms
"""
import numpy as np
import os.path as op
import torchaudio
import tqdm
from tabulate import tabulate
from examples.speech_synthesis.utils import (
gross_pitch_error, voicing_decision_error, f0_frame_error
)
from examples.speech_synthesis.evaluation.eval_sp import load_eval_spec
def difference_function(x, n, tau_max):
"""
Compute difference function of data x. This solution is implemented directly
with Numpy fft.
:param x: audio data
:param n: length of data
:param tau_max: integration window size
:return: difference function
:rtype: list
"""
x = np.array(x, np.float64)
w = x.size
tau_max = min(tau_max, w)
x_cumsum = np.concatenate((np.array([0.]), (x * x).cumsum()))
size = w + tau_max
p2 = (size // 32).bit_length()
nice_numbers = (16, 18, 20, 24, 25, 27, 30, 32)
size_pad = min(x * 2 ** p2 for x in nice_numbers if x * 2 ** p2 >= size)
fc = np.fft.rfft(x, size_pad)
conv = np.fft.irfft(fc * fc.conjugate())[:tau_max]
return x_cumsum[w:w - tau_max:-1] + x_cumsum[w] - x_cumsum[:tau_max] - \
2 * conv
def cumulative_mean_normalized_difference_function(df, n):
"""
Compute cumulative mean normalized difference function (CMND).
:param df: Difference function
:param n: length of data
:return: cumulative mean normalized difference function
:rtype: list
"""
# scipy method
cmn_df = df[1:] * range(1, n) / np.cumsum(df[1:]).astype(float)
return np.insert(cmn_df, 0, 1)
def get_pitch(cmdf, tau_min, tau_max, harmo_th=0.1):
"""
Return fundamental period of a frame based on CMND function.
:param cmdf: Cumulative Mean Normalized Difference function
:param tau_min: minimum period for speech
:param tau_max: maximum period for speech
:param harmo_th: harmonicity threshold to determine if it is necessary to
compute pitch frequency
:return: fundamental period if there is values under threshold, 0 otherwise
:rtype: float
"""
tau = tau_min
while tau < tau_max:
if cmdf[tau] < harmo_th:
while tau + 1 < tau_max and cmdf[tau + 1] < cmdf[tau]:
tau += 1
return tau
tau += 1
return 0 # if unvoiced
def compute_yin(sig, sr, w_len=512, w_step=256, f0_min=100, f0_max=500,
harmo_thresh=0.1):
"""
Compute the Yin Algorithm. Return fundamental frequency and harmonic rate.
https://github.com/NVIDIA/mellotron adaption of
https://github.com/patriceguyot/Yin
:param sig: Audio signal (list of float)
:param sr: sampling rate (int)
:param w_len: size of the analysis window (samples)
:param w_step: size of the lag between two consecutives windows (samples)
:param f0_min: Minimum fundamental frequency that can be detected (hertz)
:param f0_max: Maximum fundamental frequency that can be detected (hertz)
:param harmo_thresh: Threshold of detection. The yalgorithmù return the
first minimum of the CMND function below this threshold.
:returns:
* pitches: list of fundamental frequencies,
* harmonic_rates: list of harmonic rate values for each fundamental
frequency value (= confidence value)
* argmins: minimums of the Cumulative Mean Normalized DifferenceFunction
* times: list of time of each estimation
:rtype: tuple
"""
tau_min = int(sr / f0_max)
tau_max = int(sr / f0_min)
# time values for each analysis window
time_scale = range(0, len(sig) - w_len, w_step)
times = [t/float(sr) for t in time_scale]
frames = [sig[t:t + w_len] for t in time_scale]
pitches = [0.0] * len(time_scale)
harmonic_rates = [0.0] * len(time_scale)
argmins = [0.0] * len(time_scale)
for i, frame in enumerate(frames):
# Compute YIN
df = difference_function(frame, w_len, tau_max)
cm_df = cumulative_mean_normalized_difference_function(df, tau_max)
p = get_pitch(cm_df, tau_min, tau_max, harmo_thresh)
# Get results
if np.argmin(cm_df) > tau_min:
argmins[i] = float(sr / np.argmin(cm_df))
if p != 0: # A pitch was found
pitches[i] = float(sr / p)
harmonic_rates[i] = cm_df[p]
else: # No pitch, but we compute a value of the harmonic rate
harmonic_rates[i] = min(cm_df)
return pitches, harmonic_rates, argmins, times
def extract_f0(samples):
f0_samples = []
for sample in tqdm.tqdm(samples):
if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
f0_samples.append(None)
continue
# assume single channel
yref, sr = torchaudio.load(sample["ref"])
ysyn, _sr = torchaudio.load(sample["syn"])
yref, ysyn = yref[0], ysyn[0]
assert sr == _sr, f"{sr} != {_sr}"
yref_f0 = compute_yin(yref, sr)
ysyn_f0 = compute_yin(ysyn, sr)
f0_samples += [
{
"ref": yref_f0,
"syn": ysyn_f0
}
]
return f0_samples
def eval_f0_error(samples, distortion_fn):
results = []
for sample in tqdm.tqdm(samples):
if sample is None:
results.append(None)
continue
# assume single channel
yref_f, _, _, yref_t = sample["ref"]
ysyn_f, _, _, ysyn_t = sample["syn"]
yref_f = np.array(yref_f)
yref_t = np.array(yref_t)
ysyn_f = np.array(ysyn_f)
ysyn_t = np.array(ysyn_t)
distortion = distortion_fn(yref_t, yref_f, ysyn_t, ysyn_f)
results.append((distortion.item(),
len(yref_f),
len(ysyn_f)
))
return results
def eval_gross_pitch_error(samples):
return eval_f0_error(samples, gross_pitch_error)
def eval_voicing_decision_error(samples):
return eval_f0_error(samples, voicing_decision_error)
def eval_f0_frame_error(samples):
return eval_f0_error(samples, f0_frame_error)
def print_results(results, show_bin):
results = np.array(list(filter(lambda x: x is not None, results)))
np.set_printoptions(precision=3)
def _print_result(results):
res = {
"nutt": len(results),
"error": results[:, 0].mean(),
"std": results[:, 0].std(),
"dur_ref": int(results[:, 1].sum()),
"dur_syn": int(results[:, 2].sum()),
}
print(tabulate([res.values()], res.keys(), floatfmt=".4f"))
print(">>>> ALL")
_print_result(results)
if show_bin:
edges = [0, 200, 400, 600, 800, 1000, 2000, 4000]
for i in range(1, len(edges)):
mask = np.logical_and(results[:, 1] >= edges[i-1],
results[:, 1] < edges[i])
if not mask.any():
continue
bin_results = results[mask]
print(f">>>> ({edges[i-1]}, {edges[i]})")
_print_result(bin_results)
def main(eval_f0, gpe, vde, ffe, show_bin):
samples = load_eval_spec(eval_f0)
if gpe or vde or ffe:
f0_samples = extract_f0(samples)
if gpe:
print("===== Evaluate Gross Pitch Error =====")
results = eval_gross_pitch_error(f0_samples)
print_results(results, show_bin)
if vde:
print("===== Evaluate Voicing Decision Error =====")
results = eval_voicing_decision_error(f0_samples)
print_results(results, show_bin)
if ffe:
print("===== Evaluate F0 Frame Error =====")
results = eval_f0_frame_error(f0_samples)
print_results(results, show_bin)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("eval_f0")
parser.add_argument("--gpe", action="store_true")
parser.add_argument("--vde", action="store_true")
parser.add_argument("--ffe", action="store_true")
parser.add_argument("--show-bin", action="store_true")
args = parser.parse_args()
main(args.eval_f0, args.gpe, args.vde, args.ffe, args.show_bin)
| 8,333 | 30.213483 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/get_feature_manifest.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from collections import Counter, defaultdict
import pandas as pd
import torchaudio
from tqdm import tqdm
from fairseq.data.audio.audio_utils import convert_waveform
from examples.speech_to_text.data_utils import (
create_zip,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_tsv_to_dicts,
save_df_to_tsv
)
from examples.speech_synthesis.data_utils import (
extract_logmel_spectrogram, extract_pitch, extract_energy, get_global_cmvn,
ipa_phonemize, get_mfa_alignment, get_unit_alignment,
get_feature_value_min_max
)
log = logging.getLogger(__name__)
def process(args):
assert "train" in args.splits
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
print("Fetching data...")
audio_manifest_root = Path(args.audio_manifest_root).absolute()
samples = []
for s in args.splits:
for e in load_tsv_to_dicts(audio_manifest_root / f"{s}.audio.tsv"):
e["split"] = s
samples.append(e)
sample_ids = [s["id"] for s in samples]
# Get alignment info
id_to_alignment = None
if args.textgrid_zip is not None:
assert args.id_to_units_tsv is None
id_to_alignment = get_mfa_alignment(
args.textgrid_zip, sample_ids, args.sample_rate, args.hop_length
)
elif args.id_to_units_tsv is not None:
# assume identical hop length on the unit sequence
id_to_alignment = get_unit_alignment(args.id_to_units_tsv, sample_ids)
# Extract features and pack features into ZIP
feature_name = "logmelspec80"
zip_path = out_root / f"{feature_name}.zip"
pitch_zip_path = out_root / "pitch.zip"
energy_zip_path = out_root / "energy.zip"
gcmvn_npz_path = out_root / "gcmvn_stats.npz"
if zip_path.exists() and gcmvn_npz_path.exists():
print(f"{zip_path} and {gcmvn_npz_path} exist.")
else:
feature_root = out_root / feature_name
feature_root.mkdir(exist_ok=True)
pitch_root = out_root / "pitch"
energy_root = out_root / "energy"
if args.add_fastspeech_targets:
pitch_root.mkdir(exist_ok=True)
energy_root.mkdir(exist_ok=True)
print("Extracting Mel spectrogram features...")
for sample in tqdm(samples):
waveform, sample_rate = torchaudio.load(sample["audio"])
waveform, sample_rate = convert_waveform(
waveform, sample_rate, normalize_volume=args.normalize_volume,
to_sample_rate=args.sample_rate
)
sample_id = sample["id"]
target_length = None
if id_to_alignment is not None:
a = id_to_alignment[sample_id]
target_length = sum(a.frame_durations)
if a.start_sec is not None and a.end_sec is not None:
start_frame = int(a.start_sec * sample_rate)
end_frame = int(a.end_sec * sample_rate)
waveform = waveform[:, start_frame: end_frame]
extract_logmel_spectrogram(
waveform, sample_rate, feature_root / f"{sample_id}.npy",
win_length=args.win_length, hop_length=args.hop_length,
n_fft=args.n_fft, n_mels=args.n_mels, f_min=args.f_min,
f_max=args.f_max, target_length=target_length
)
if args.add_fastspeech_targets:
assert id_to_alignment is not None
extract_pitch(
waveform, sample_rate, pitch_root / f"{sample_id}.npy",
hop_length=args.hop_length, log_scale=True,
phoneme_durations=id_to_alignment[sample_id].frame_durations
)
extract_energy(
waveform, energy_root / f"{sample_id}.npy",
hop_length=args.hop_length, n_fft=args.n_fft,
log_scale=True,
phoneme_durations=id_to_alignment[sample_id].frame_durations
)
print("ZIPing features...")
create_zip(feature_root, zip_path)
get_global_cmvn(feature_root, gcmvn_npz_path)
shutil.rmtree(feature_root)
if args.add_fastspeech_targets:
create_zip(pitch_root, pitch_zip_path)
shutil.rmtree(pitch_root)
create_zip(energy_root, energy_zip_path)
shutil.rmtree(energy_root)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
pitch_paths, pitch_lengths, energy_paths, energy_lengths = [None] * 4
if args.add_fastspeech_targets:
pitch_paths, pitch_lengths = get_zip_manifest(pitch_zip_path)
energy_paths, energy_lengths = get_zip_manifest(energy_zip_path)
# Generate TSV manifest
print("Generating manifest...")
id_to_cer = None
if args.cer_threshold is not None:
assert Path(args.cer_tsv_path).is_file()
id_to_cer = {
x["id"]: x["uer"] for x in load_tsv_to_dicts(args.cer_tsv_path)
}
manifest_by_split = {split: defaultdict(list) for split in args.splits}
for sample in tqdm(samples):
sample_id, split = sample["id"], sample["split"]
if args.snr_threshold is not None and "snr" in sample \
and sample["snr"] < args.snr_threshold:
continue
if args.cer_threshold is not None \
and id_to_cer[sample_id] > args.cer_threhold:
continue
normalized_utt = sample["tgt_text"]
if id_to_alignment is not None:
normalized_utt = " ".join(id_to_alignment[sample_id].tokens)
elif args.ipa_vocab:
normalized_utt = ipa_phonemize(
normalized_utt, lang=args.lang, use_g2p=args.use_g2p
)
manifest_by_split[split]["id"].append(sample_id)
manifest_by_split[split]["audio"].append(audio_paths[sample_id])
manifest_by_split[split]["n_frames"].append(audio_lengths[sample_id])
manifest_by_split[split]["tgt_text"].append(normalized_utt)
manifest_by_split[split]["speaker"].append(sample["speaker"])
manifest_by_split[split]["src_text"].append(sample["src_text"])
if args.add_fastspeech_targets:
assert id_to_alignment is not None
duration = " ".join(
str(d) for d in id_to_alignment[sample_id].frame_durations
)
manifest_by_split[split]["duration"].append(duration)
manifest_by_split[split]["pitch"].append(pitch_paths[sample_id])
manifest_by_split[split]["energy"].append(energy_paths[sample_id])
for split in args.splits:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
out_root / f"{split}.tsv"
)
# Generate vocab
vocab_name, spm_filename = None, None
if id_to_alignment is not None or args.ipa_vocab:
vocab = Counter()
for t in manifest_by_split["train"]["tgt_text"]:
vocab.update(t.split(" "))
vocab_name = "vocab.txt"
with open(out_root / vocab_name, "w") as f:
for s, c in vocab.most_common():
f.write(f"{s} {c}\n")
else:
spm_filename_prefix = "spm_char"
spm_filename = f"{spm_filename_prefix}.model"
with NamedTemporaryFile(mode="w") as f:
for t in manifest_by_split["train"]["tgt_text"]:
f.write(t + "\n")
f.flush() # needed to ensure gen_vocab sees dumped text
gen_vocab(Path(f.name), out_root / spm_filename_prefix, "char")
# Generate speaker list
speakers = sorted({sample["speaker"] for sample in samples})
speakers_path = out_root / "speakers.txt"
with open(speakers_path, "w") as f:
for speaker in speakers:
f.write(f"{speaker}\n")
# Generate config YAML
win_len_t = args.win_length / args.sample_rate
hop_len_t = args.hop_length / args.sample_rate
extra = {
"sample_rate": args.sample_rate,
"features": {
"type": "spectrogram+melscale+log",
"eps": 1e-5, "n_mels": args.n_mels, "n_fft": args.n_fft,
"window_fn": "hann", "win_length": args.win_length,
"hop_length": args.hop_length, "sample_rate": args.sample_rate,
"win_len_t": win_len_t, "hop_len_t": hop_len_t,
"f_min": args.f_min, "f_max": args.f_max,
"n_stft": args.n_fft // 2 + 1
}
}
if len(speakers) > 1:
extra["speaker_set_filename"] = "speakers.txt"
if args.add_fastspeech_targets:
pitch_min, pitch_max = get_feature_value_min_max(
[(out_root / n).as_posix() for n in pitch_paths.values()]
)
energy_min, energy_max = get_feature_value_min_max(
[(out_root / n).as_posix() for n in energy_paths.values()]
)
extra["features"]["pitch_min"] = pitch_min
extra["features"]["pitch_max"] = pitch_max
extra["features"]["energy_min"] = energy_min
extra["features"]["energy_max"] = energy_max
gen_config_yaml(
out_root, spm_filename=spm_filename, vocab_name=vocab_name,
audio_root=out_root.as_posix(), input_channels=None,
input_feat_per_channel=None, specaugment_policy=None,
cmvn_type="global", gcmvn_path=gcmvn_npz_path, extra=extra
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest-root", "-m", required=True, type=str)
parser.add_argument("--output-root", "-o", required=True, type=str)
parser.add_argument("--splits", "-s", type=str, nargs="+",
default=["train", "dev", "test"])
parser.add_argument("--ipa-vocab", action="store_true")
parser.add_argument("--use-g2p", action="store_true")
parser.add_argument("--lang", type=str, default="en-us")
parser.add_argument("--win-length", type=int, default=1024)
parser.add_argument("--hop-length", type=int, default=256)
parser.add_argument("--n-fft", type=int, default=1024)
parser.add_argument("--n-mels", type=int, default=80)
parser.add_argument("--f-min", type=int, default=20)
parser.add_argument("--f-max", type=int, default=8000)
parser.add_argument("--sample-rate", type=int, default=22050)
parser.add_argument("--normalize-volume", "-n", action="store_true")
parser.add_argument("--textgrid-zip", type=str, default=None)
parser.add_argument("--id-to-units-tsv", type=str, default=None)
parser.add_argument("--add-fastspeech-targets", action="store_true")
parser.add_argument("--snr-threshold", type=float, default=None)
parser.add_argument("--cer-threshold", type=float, default=None)
parser.add_argument("--cer-tsv-path", type=str, default="")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 11,171 | 41.479087 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/get_common_voice_audio_manifest.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
from collections import defaultdict
from typing import List, Dict, Tuple
import pandas as pd
import numpy as np
import torchaudio
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_df_from_tsv, save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def get_top_n(
root: Path, n_speakers: int = 10, min_n_tokens: int = 5
) -> pd.DataFrame:
df = load_df_from_tsv(root / "validated.tsv")
df["n_tokens"] = [len(s.split()) for s in df["sentence"]]
df = df[df["n_tokens"] >= min_n_tokens]
df["n_frames"] = [
torchaudio.info((root / "clips" / p).as_posix()).num_frames
for p in tqdm(df["path"])
]
df["id"] = [Path(p).stem for p in df["path"]]
total_duration_ms = df.groupby("client_id")["n_frames"].agg(["sum"])
total_duration_ms = total_duration_ms.sort_values("sum", ascending=False)
top_n_total_duration_ms = total_duration_ms.head(n_speakers)
top_n_client_ids = set(top_n_total_duration_ms.index.tolist())
df_top_n = df[df["client_id"].isin(top_n_client_ids)]
return df_top_n
def get_splits(
df, train_split_ratio=0.99, speaker_in_all_splits=False, rand_seed=0
) -> Tuple[Dict[str, str], List[str]]:
np.random.seed(rand_seed)
dev_split_ratio = (1. - train_split_ratio) / 3
grouped = list(df.groupby("client_id"))
id_to_split = {}
for _, cur_df in tqdm(grouped):
cur_n_examples = len(cur_df)
if speaker_in_all_splits and cur_n_examples < 3:
continue
cur_n_train = int(cur_n_examples * train_split_ratio)
cur_n_dev = int(cur_n_examples * dev_split_ratio)
cur_n_test = cur_n_examples - cur_n_dev - cur_n_train
if speaker_in_all_splits and cur_n_dev * cur_n_test == 0:
cur_n_dev, cur_n_test = 1, 1
cur_n_train = cur_n_examples - cur_n_dev - cur_n_test
cur_indices = cur_df.index.tolist()
cur_shuffled_indices = np.random.permutation(cur_n_examples)
cur_shuffled_indices = [cur_indices[i] for i in cur_shuffled_indices]
cur_indices_by_split = {
"train": cur_shuffled_indices[:cur_n_train],
"dev": cur_shuffled_indices[cur_n_train: cur_n_train + cur_n_dev],
"test": cur_shuffled_indices[cur_n_train + cur_n_dev:]
}
for split in SPLITS:
for i in cur_indices_by_split[split]:
id_ = df["id"].loc[i]
id_to_split[id_] = split
return id_to_split, sorted(df["client_id"].unique())
def convert_to_wav(root: Path, filenames: List[str], target_sr=16_000):
out_root = root / "wav"
out_root.mkdir(exist_ok=True, parents=True)
print("Converting to WAV...")
for n in tqdm(filenames):
in_path = (root / "clips" / n).as_posix()
waveform, sr = torchaudio.load(in_path)
converted, converted_sr = torchaudio.sox_effects.apply_effects_tensor(
waveform, sr, [["rate", str(target_sr)], ["channels", "1"]]
)
out_path = (out_root / Path(n).with_suffix(".wav").name).as_posix()
torchaudio.save(out_path, converted, converted_sr, encoding="PCM_S",
bits_per_sample=16)
def process(args):
data_root = Path(args.data_root).absolute() / args.lang
# Generate TSV manifest
print("Generating manifest...")
df_top_n = get_top_n(data_root)
id_to_split, speakers = get_splits(df_top_n)
if args.convert_to_wav:
convert_to_wav(data_root, df_top_n["path"].tolist())
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
for sample in tqdm(df_top_n.to_dict(orient="index").values()):
sample_id = sample["id"]
split = id_to_split[sample_id]
manifest_by_split[split]["id"].append(sample_id)
if args.convert_to_wav:
audio_path = data_root / "wav" / f"{sample_id}.wav"
else:
audio_path = data_root / "clips" / f"{sample_id}.mp3"
manifest_by_split[split]["audio"].append(audio_path.as_posix())
manifest_by_split[split]["n_frames"].append(sample["n_frames"])
manifest_by_split[split]["tgt_text"].append(sample["sentence"])
manifest_by_split[split]["speaker"].append(sample["client_id"])
manifest_by_split[split]["src_text"].append(sample["sentence"])
output_root = Path(args.output_manifest_root).absolute()
output_root.mkdir(parents=True, exist_ok=True)
for split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
output_root / f"{split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
parser.add_argument("--lang", "-l", required=True, type=str)
parser.add_argument("--convert-to-wav", action="store_true")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 5,277 | 36.432624 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/denoise_and_vad_audio.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import csv
import tempfile
from collections import defaultdict
from pathlib import Path
import torchaudio
try:
import webrtcvad
except ImportError:
raise ImportError("Please install py-webrtcvad: pip install webrtcvad")
import pandas as pd
from tqdm import tqdm
from examples.speech_synthesis.preprocessing.denoiser.pretrained import master64
import examples.speech_synthesis.preprocessing.denoiser.utils as utils
from examples.speech_synthesis.preprocessing.vad import (
frame_generator, vad_collector, read_wave, write_wave, FS_MS, THRESHOLD,
SCALE
)
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
PATHS = ["after_denoise", "after_vad"]
MIN_T = 0.05
def generate_tmp_filename(extension="txt"):
return tempfile._get_default_tempdir() + "/" + \
next(tempfile._get_candidate_names()) + "." + extension
def convert_sr(inpath, sr, output_path=None):
if not output_path:
output_path = generate_tmp_filename("wav")
cmd = f"sox {inpath} -r {sr} {output_path}"
os.system(cmd)
return output_path
def apply_vad(vad, inpath):
audio, sample_rate = read_wave(inpath)
frames = frame_generator(FS_MS, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, FS_MS, 300, vad, frames)
merge_segments = list()
timestamp_start = 0.0
timestamp_end = 0.0
# removing start, end, and long sequences of sils
for i, segment in enumerate(segments):
merge_segments.append(segment[0])
if i and timestamp_start:
sil_duration = segment[1] - timestamp_end
if sil_duration > THRESHOLD:
merge_segments.append(int(THRESHOLD / SCALE) * (b'\x00'))
else:
merge_segments.append(int((sil_duration / SCALE)) * (b'\x00'))
timestamp_start = segment[1]
timestamp_end = segment[2]
segment = b''.join(merge_segments)
return segment, sample_rate
def write(wav, filename, sr=16_000):
# Normalize audio if it prevents clipping
wav = wav / max(wav.abs().max().item(), 1)
torchaudio.save(filename, wav.cpu(), sr, encoding="PCM_S",
bits_per_sample=16)
def process(args):
# making sure we are requested either denoise or vad
if not args.denoise and not args.vad:
log.error("No denoise or vad is requested.")
return
log.info("Creating out directories...")
if args.denoise:
out_denoise = Path(args.output_dir).absolute().joinpath(PATHS[0])
out_denoise.mkdir(parents=True, exist_ok=True)
if args.vad:
out_vad = Path(args.output_dir).absolute().joinpath(PATHS[1])
out_vad.mkdir(parents=True, exist_ok=True)
log.info("Loading pre-trained speech enhancement model...")
model = master64().to(args.device)
log.info("Building the VAD model...")
vad = webrtcvad.Vad(int(args.vad_agg_level))
# preparing the output dict
output_dict = defaultdict(list)
log.info(f"Parsing input manifest: {args.audio_manifest}")
with open(args.audio_manifest, "r") as f:
manifest_dict = csv.DictReader(f, delimiter="\t")
for row in tqdm(manifest_dict):
filename = str(row["audio"])
final_output = filename
keep_sample = True
n_frames = row["n_frames"]
snr = -1
if args.denoise:
output_path_denoise = out_denoise.joinpath(Path(filename).name)
# convert to 16khz in case we use a differet sr
tmp_path = convert_sr(final_output, 16000)
# loading audio file and generating the enhanced version
out, sr = torchaudio.load(tmp_path)
out = out.to(args.device)
estimate = model(out)
estimate = (1 - args.dry_wet) * estimate + args.dry_wet * out
write(estimate[0], str(output_path_denoise), sr)
snr = utils.cal_snr(out, estimate)
snr = snr.cpu().detach().numpy()[0][0]
final_output = str(output_path_denoise)
if args.vad:
output_path_vad = out_vad.joinpath(Path(filename).name)
sr = torchaudio.info(final_output).sample_rate
if sr in [16000, 32000, 48000]:
tmp_path = final_output
elif sr < 16000:
tmp_path = convert_sr(final_output, 16000)
elif sr < 32000:
tmp_path = convert_sr(final_output, 32000)
else:
tmp_path = convert_sr(final_output, 48000)
# apply VAD
segment, sample_rate = apply_vad(vad, tmp_path)
if len(segment) < sample_rate * MIN_T:
keep_sample = False
print((
f"WARNING: skip {filename} because it is too short "
f"after VAD ({len(segment) / sample_rate} < {MIN_T})"
))
else:
if sample_rate != sr:
tmp_path = generate_tmp_filename("wav")
write_wave(tmp_path, segment, sample_rate)
convert_sr(tmp_path, sr,
output_path=str(output_path_vad))
else:
write_wave(str(output_path_vad), segment, sample_rate)
final_output = str(output_path_vad)
segment, _ = torchaudio.load(final_output)
n_frames = segment.size(1)
if keep_sample:
output_dict["id"].append(row["id"])
output_dict["audio"].append(final_output)
output_dict["n_frames"].append(n_frames)
output_dict["tgt_text"].append(row["tgt_text"])
output_dict["speaker"].append(row["speaker"])
output_dict["src_text"].append(row["src_text"])
output_dict["snr"].append(snr)
out_tsv_path = Path(args.output_dir) / Path(args.audio_manifest).name
log.info(f"Saving manifest to {out_tsv_path.as_posix()}")
save_df_to_tsv(pd.DataFrame.from_dict(output_dict), out_tsv_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest", "-i", required=True,
type=str, help="path to the input manifest.")
parser.add_argument(
"--output-dir", "-o", required=True, type=str,
help="path to the output dir. it will contain files after denoising and"
" vad"
)
parser.add_argument("--vad-agg-level", "-a", type=int, default=2,
help="the aggresive level of the vad [0-3].")
parser.add_argument(
"--dry-wet", "-dw", type=float, default=0.01,
help="the level of linear interpolation between noisy and enhanced "
"files."
)
parser.add_argument(
"--device", "-d", type=str, default="cpu",
help="the device to be used for the speech enhancement model: "
"cpu | cuda."
)
parser.add_argument("--denoise", action="store_true",
help="apply a denoising")
parser.add_argument("--vad", action="store_true", help="apply a VAD")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 7,655 | 36.346341 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/get_ljspeech_audio_manifest.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
from collections import defaultdict
import pandas as pd
from torchaudio.datasets import LJSPEECH
from tqdm import tqdm
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def process(args):
out_root = Path(args.output_data_root).absolute()
out_root.mkdir(parents=True, exist_ok=True)
# Generate TSV manifest
print("Generating manifest...")
# following FastSpeech's splits
dataset = LJSPEECH(out_root.as_posix(), download=True)
id_to_split = {}
for x in dataset._flist:
id_ = x[0]
speaker = id_.split("-")[0]
id_to_split[id_] = {
"LJ001": "test", "LJ002": "test", "LJ003": "dev"
}.get(speaker, "train")
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
progress = tqdm(enumerate(dataset), total=len(dataset))
for i, (waveform, _, utt, normalized_utt) in progress:
sample_id = dataset._flist[i][0]
split = id_to_split[sample_id]
manifest_by_split[split]["id"].append(sample_id)
audio_path = f"{dataset._path}/{sample_id}.wav"
manifest_by_split[split]["audio"].append(audio_path)
manifest_by_split[split]["n_frames"].append(len(waveform[0]))
manifest_by_split[split]["tgt_text"].append(normalized_utt)
manifest_by_split[split]["speaker"].append("ljspeech")
manifest_by_split[split]["src_text"].append(utt)
manifest_root = Path(args.output_manifest_root).absolute()
manifest_root.mkdir(parents=True, exist_ok=True)
for split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[split]),
manifest_root / f"{split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 2,288 | 31.239437 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 177 | 34.6 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/get_vctk_audio_manifest.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import numpy as np
import re
from pathlib import Path
from collections import defaultdict
import pandas as pd
from torchaudio.datasets import VCTK
from tqdm import tqdm
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
SPLITS = ["train", "dev", "test"]
def normalize_text(text):
return re.sub(r"[^a-zA-Z.?!,'\- ]", '', text)
def process(args):
out_root = Path(args.output_data_root).absolute()
out_root.mkdir(parents=True, exist_ok=True)
# Generate TSV manifest
print("Generating manifest...")
dataset = VCTK(out_root.as_posix(), download=False)
ids = list(dataset._walker)
np.random.seed(args.seed)
np.random.shuffle(ids)
n_train = len(ids) - args.n_dev - args.n_test
_split = ["train"] * n_train + ["dev"] * args.n_dev + ["test"] * args.n_test
id_to_split = dict(zip(ids, _split))
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
progress = tqdm(enumerate(dataset), total=len(dataset))
for i, (waveform, _, text, speaker_id, _) in progress:
sample_id = dataset._walker[i]
_split = id_to_split[sample_id]
audio_dir = Path(dataset._path) / dataset._folder_audio / speaker_id
audio_path = audio_dir / f"{sample_id}.wav"
text = normalize_text(text)
manifest_by_split[_split]["id"].append(sample_id)
manifest_by_split[_split]["audio"].append(audio_path.as_posix())
manifest_by_split[_split]["n_frames"].append(len(waveform[0]))
manifest_by_split[_split]["tgt_text"].append(text)
manifest_by_split[_split]["speaker"].append(speaker_id)
manifest_by_split[_split]["src_text"].append(text)
manifest_root = Path(args.output_manifest_root).absolute()
manifest_root.mkdir(parents=True, exist_ok=True)
for _split in SPLITS:
save_df_to_tsv(
pd.DataFrame.from_dict(manifest_by_split[_split]),
manifest_root / f"{_split}.audio.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-data-root", "-d", required=True, type=str)
parser.add_argument("--output-manifest-root", "-m", required=True, type=str)
parser.add_argument("--n-dev", default=50, type=int)
parser.add_argument("--n-test", default=100, type=int)
parser.add_argument("--seed", "-s", default=1234, type=int)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 2,685 | 32.575 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/get_speaker_embedding.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import defaultdict
from itertools import chain
from pathlib import Path
import numpy as np
import torchaudio
import torchaudio.sox_effects as ta_sox
import yaml
from tqdm import tqdm
from examples.speech_to_text.data_utils import load_tsv_to_dicts
from examples.speech_synthesis.preprocessing.speaker_embedder import SpkrEmbedder
def extract_embedding(audio_path, embedder):
wav, sr = torchaudio.load(audio_path) # 2D
if sr != embedder.RATE:
wav, sr = ta_sox.apply_effects_tensor(
wav, sr, [["rate", str(embedder.RATE)]]
)
try:
emb = embedder([wav[0].cuda().float()]).cpu().numpy()
except RuntimeError:
emb = None
return emb
def process(args):
print("Fetching data...")
raw_manifest_root = Path(args.raw_manifest_root).absolute()
samples = [load_tsv_to_dicts(raw_manifest_root / (s + ".tsv"))
for s in args.splits]
samples = list(chain(*samples))
with open(args.config, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
with open(f"{config['audio_root']}/{config['speaker_set_filename']}") as f:
speaker_to_id = {r.strip(): i for i, r in enumerate(f)}
embedder = SpkrEmbedder(args.ckpt).cuda()
speaker_to_cnt = defaultdict(float)
speaker_to_emb = defaultdict(float)
for sample in tqdm(samples, desc="extract emb"):
emb = extract_embedding(sample["audio"], embedder)
if emb is not None:
speaker_to_cnt[sample["speaker"]] += 1
speaker_to_emb[sample["speaker"]] += emb
if len(speaker_to_emb) != len(speaker_to_id):
missed = set(speaker_to_id) - set(speaker_to_emb.keys())
print(
f"WARNING: missing embeddings for {len(missed)} speaker:\n{missed}"
)
speaker_emb_mat = np.zeros((len(speaker_to_id), len(emb)), float)
for speaker in speaker_to_emb:
idx = speaker_to_id[speaker]
emb = speaker_to_emb[speaker]
cnt = speaker_to_cnt[speaker]
speaker_emb_mat[idx, :] = emb / cnt
speaker_emb_name = "speaker_emb.npy"
speaker_emb_path = f"{config['audio_root']}/{speaker_emb_name}"
np.save(speaker_emb_path, speaker_emb_mat)
config["speaker_emb_filename"] = speaker_emb_name
with open(args.new_config, "w") as f:
yaml.dump(config, f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--raw-manifest-root", "-m", required=True, type=str)
parser.add_argument("--splits", "-s", type=str, nargs="+",
default=["train"])
parser.add_argument("--config", "-c", required=True, type=str)
parser.add_argument("--new-config", "-n", required=True, type=str)
parser.add_argument("--ckpt", required=True, type=str,
help="speaker embedder checkpoint")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 3,116 | 33.633333 | 81 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/resample.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import math
import torch as th
from torch.nn import functional as F
def sinc(t):
"""sinc.
:param t: the input tensor
"""
return th.where(t == 0, th.tensor(1., device=t.device, dtype=t.dtype),
th.sin(t) / t)
def kernel_upsample2(zeros=56):
"""kernel_upsample2.
"""
win = th.hann_window(4 * zeros + 1, periodic=False)
winodd = win[1::2]
t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
t *= math.pi
kernel = (sinc(t) * winodd).view(1, 1, -1)
return kernel
def upsample2(x, zeros=56):
"""
Upsampling the input by 2 using sinc interpolation.
Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
Vol. 9. IEEE, 1984.
"""
*other, time = x.shape
kernel = kernel_upsample2(zeros).to(x)
out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(
*other, time
)
y = th.stack([x, out], dim=-1)
return y.view(*other, -1)
def kernel_downsample2(zeros=56):
"""kernel_downsample2.
"""
win = th.hann_window(4 * zeros + 1, periodic=False)
winodd = win[1::2]
t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
t.mul_(math.pi)
kernel = (sinc(t) * winodd).view(1, 1, -1)
return kernel
def downsample2(x, zeros=56):
"""
Downsampling the input by 2 using sinc interpolation.
Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
Vol. 9. IEEE, 1984.
"""
if x.shape[-1] % 2 != 0:
x = F.pad(x, (0, 1))
xeven = x[..., ::2]
xodd = x[..., 1::2]
*other, time = xodd.shape
kernel = kernel_downsample2(zeros).to(x)
out = xeven + F.conv1d(
xodd.view(-1, 1, time), kernel, padding=zeros
)[..., :-1].view(*other, time)
return out.view(*other, -1).mul(0.5)
| 2,226 | 26.8375 | 89 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/demucs.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import math
import time
import torch as th
from torch import nn
from torch.nn import functional as F
from .resample import downsample2, upsample2
from .utils import capture_init
class BLSTM(nn.Module):
def __init__(self, dim, layers=2, bi=True):
super().__init__()
klass = nn.LSTM
self.lstm = klass(
bidirectional=bi, num_layers=layers, hidden_size=dim, input_size=dim
)
self.linear = None
if bi:
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x, hidden=None):
x, hidden = self.lstm(x, hidden)
if self.linear:
x = self.linear(x)
return x, hidden
def rescale_conv(conv, reference):
std = conv.weight.std().detach()
scale = (std / reference)**0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module, reference):
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
class Demucs(nn.Module):
"""
Demucs speech enhancement model.
Args:
- chin (int): number of input channels.
- chout (int): number of output channels.
- hidden (int): number of initial hidden channels.
- depth (int): number of layers.
- kernel_size (int): kernel size for each layer.
- stride (int): stride for each layer.
- causal (bool): if false, uses BiLSTM instead of LSTM.
- resample (int): amount of resampling to apply to the input/output.
Can be one of 1, 2 or 4.
- growth (float): number of channels is multiplied by this for every layer.
- max_hidden (int): maximum number of channels. Can be useful to
control the size/speed of the model.
- normalize (bool): if true, normalize the input.
- glu (bool): if true uses GLU instead of ReLU in 1x1 convolutions.
- rescale (float): controls custom weight initialization.
See https://arxiv.org/abs/1911.13254.
- floor (float): stability flooring when normalizing.
"""
@capture_init
def __init__(self,
chin=1,
chout=1,
hidden=48,
depth=5,
kernel_size=8,
stride=4,
causal=True,
resample=4,
growth=2,
max_hidden=10_000,
normalize=True,
glu=True,
rescale=0.1,
floor=1e-3):
super().__init__()
if resample not in [1, 2, 4]:
raise ValueError("Resample should be 1, 2 or 4.")
self.chin = chin
self.chout = chout
self.hidden = hidden
self.depth = depth
self.kernel_size = kernel_size
self.stride = stride
self.causal = causal
self.floor = floor
self.resample = resample
self.normalize = normalize
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
activation = nn.GLU(1) if glu else nn.ReLU()
ch_scale = 2 if glu else 1
for index in range(depth):
encode = []
encode += [
nn.Conv1d(chin, hidden, kernel_size, stride),
nn.ReLU(),
nn.Conv1d(hidden, hidden * ch_scale, 1), activation,
]
self.encoder.append(nn.Sequential(*encode))
decode = []
decode += [
nn.Conv1d(hidden, ch_scale * hidden, 1), activation,
nn.ConvTranspose1d(hidden, chout, kernel_size, stride),
]
if index > 0:
decode.append(nn.ReLU())
self.decoder.insert(0, nn.Sequential(*decode))
chout = hidden
chin = hidden
hidden = min(int(growth * hidden), max_hidden)
self.lstm = BLSTM(chin, bi=not causal)
if rescale:
rescale_module(self, reference=rescale)
def valid_length(self, length):
"""
Return the nearest valid length to use with the model so that
there is no time steps left over in a convolutions, e.g. for all
layers, size of the input - kernel_size % stride = 0.
If the mixture has a valid length, the estimated sources
will have exactly the same length.
"""
length = math.ceil(length * self.resample)
for _ in range(self.depth):
length = math.ceil((length - self.kernel_size) / self.stride) + 1
length = max(length, 1)
for _ in range(self.depth):
length = (length - 1) * self.stride + self.kernel_size
length = int(math.ceil(length / self.resample))
return int(length)
@property
def total_stride(self):
return self.stride ** self.depth // self.resample
def forward(self, mix):
if mix.dim() == 2:
mix = mix.unsqueeze(1)
if self.normalize:
mono = mix.mean(dim=1, keepdim=True)
std = mono.std(dim=-1, keepdim=True)
mix = mix / (self.floor + std)
else:
std = 1
length = mix.shape[-1]
x = mix
x = F.pad(x, (0, self.valid_length(length) - length))
if self.resample == 2:
x = upsample2(x)
elif self.resample == 4:
x = upsample2(x)
x = upsample2(x)
skips = []
for encode in self.encoder:
x = encode(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, _ = self.lstm(x)
x = x.permute(1, 2, 0)
for decode in self.decoder:
skip = skips.pop(-1)
x = x + skip[..., :x.shape[-1]]
x = decode(x)
if self.resample == 2:
x = downsample2(x)
elif self.resample == 4:
x = downsample2(x)
x = downsample2(x)
x = x[..., :length]
return std * x
def fast_conv(conv, x):
"""
Faster convolution evaluation if either kernel size is 1
or length of sequence is 1.
"""
batch, chin, length = x.shape
chout, chin, kernel = conv.weight.shape
assert batch == 1
if kernel == 1:
x = x.view(chin, length)
out = th.addmm(conv.bias.view(-1, 1),
conv.weight.view(chout, chin), x)
elif length == kernel:
x = x.view(chin * kernel, 1)
out = th.addmm(conv.bias.view(-1, 1),
conv.weight.view(chout, chin * kernel), x)
else:
out = conv(x)
return out.view(batch, chout, -1)
class DemucsStreamer:
"""
Streaming implementation for Demucs. It supports being fed with any amount
of audio at a time. You will get back as much audio as possible at that
point.
Args:
- demucs (Demucs): Demucs model.
- dry (float): amount of dry (e.g. input) signal to keep. 0 is maximum
noise removal, 1 just returns the input signal. Small values > 0
allows to limit distortions.
- num_frames (int): number of frames to process at once. Higher values
will increase overall latency but improve the real time factor.
- resample_lookahead (int): extra lookahead used for the resampling.
- resample_buffer (int): size of the buffer of previous inputs/outputs
kept for resampling.
"""
def __init__(self, demucs,
dry=0,
num_frames=1,
resample_lookahead=64,
resample_buffer=256):
device = next(iter(demucs.parameters())).device
self.demucs = demucs
self.lstm_state = None
self.conv_state = None
self.dry = dry
self.resample_lookahead = resample_lookahead
resample_buffer = min(demucs.total_stride, resample_buffer)
self.resample_buffer = resample_buffer
self.frame_length = demucs.valid_length(1) + \
demucs.total_stride * (num_frames - 1)
self.total_length = self.frame_length + self.resample_lookahead
self.stride = demucs.total_stride * num_frames
self.resample_in = th.zeros(demucs.chin, resample_buffer, device=device)
self.resample_out = th.zeros(
demucs.chin, resample_buffer, device=device
)
self.frames = 0
self.total_time = 0
self.variance = 0
self.pending = th.zeros(demucs.chin, 0, device=device)
bias = demucs.decoder[0][2].bias
weight = demucs.decoder[0][2].weight
chin, chout, kernel = weight.shape
self._bias = bias.view(-1, 1).repeat(1, kernel).view(-1, 1)
self._weight = weight.permute(1, 2, 0).contiguous()
def reset_time_per_frame(self):
self.total_time = 0
self.frames = 0
@property
def time_per_frame(self):
return self.total_time / self.frames
def flush(self):
"""
Flush remaining audio by padding it with zero. Call this
when you have no more input and want to get back the last chunk of audio.
"""
pending_length = self.pending.shape[1]
padding = th.zeros(
self.demucs.chin, self.total_length, device=self.pending.device
)
out = self.feed(padding)
return out[:, :pending_length]
def feed(self, wav):
"""
Apply the model to mix using true real time evaluation.
Normalization is done online as is the resampling.
"""
begin = time.time()
demucs = self.demucs
resample_buffer = self.resample_buffer
stride = self.stride
resample = demucs.resample
if wav.dim() != 2:
raise ValueError("input wav should be two dimensional.")
chin, _ = wav.shape
if chin != demucs.chin:
raise ValueError(f"Expected {demucs.chin} channels, got {chin}")
self.pending = th.cat([self.pending, wav], dim=1)
outs = []
while self.pending.shape[1] >= self.total_length:
self.frames += 1
frame = self.pending[:, :self.total_length]
dry_signal = frame[:, :stride]
if demucs.normalize:
mono = frame.mean(0)
variance = (mono**2).mean()
self.variance = variance / self.frames + \
(1 - 1 / self.frames) * self.variance
frame = frame / (demucs.floor + math.sqrt(self.variance))
frame = th.cat([self.resample_in, frame], dim=-1)
self.resample_in[:] = frame[:, stride - resample_buffer:stride]
if resample == 4:
frame = upsample2(upsample2(frame))
elif resample == 2:
frame = upsample2(frame)
# remove pre sampling buffer
frame = frame[:, resample * resample_buffer:]
# remove extra samples after window
frame = frame[:, :resample * self.frame_length]
out, extra = self._separate_frame(frame)
padded_out = th.cat([self.resample_out, out, extra], 1)
self.resample_out[:] = out[:, -resample_buffer:]
if resample == 4:
out = downsample2(downsample2(padded_out))
elif resample == 2:
out = downsample2(padded_out)
else:
out = padded_out
out = out[:, resample_buffer // resample:]
out = out[:, :stride]
if demucs.normalize:
out *= math.sqrt(self.variance)
out = self.dry * dry_signal + (1 - self.dry) * out
outs.append(out)
self.pending = self.pending[:, stride:]
self.total_time += time.time() - begin
if outs:
out = th.cat(outs, 1)
else:
out = th.zeros(chin, 0, device=wav.device)
return out
def _separate_frame(self, frame):
demucs = self.demucs
skips = []
next_state = []
first = self.conv_state is None
stride = self.stride * demucs.resample
x = frame[None]
for idx, encode in enumerate(demucs.encoder):
stride //= demucs.stride
length = x.shape[2]
if idx == demucs.depth - 1:
# This is sligthly faster for the last conv
x = fast_conv(encode[0], x)
x = encode[1](x)
x = fast_conv(encode[2], x)
x = encode[3](x)
else:
if not first:
prev = self.conv_state.pop(0)
prev = prev[..., stride:]
tgt = (length - demucs.kernel_size) // demucs.stride + 1
missing = tgt - prev.shape[-1]
offset = length - demucs.kernel_size - \
demucs.stride * (missing - 1)
x = x[..., offset:]
x = encode[1](encode[0](x))
x = fast_conv(encode[2], x)
x = encode[3](x)
if not first:
x = th.cat([prev, x], -1)
next_state.append(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, self.lstm_state = demucs.lstm(x, self.lstm_state)
x = x.permute(1, 2, 0)
# In the following, x contains only correct samples, i.e. the one
# for which each time position is covered by two window of the upper
# layer. extra contains extra samples to the right, and is used only as
# a better padding for the online resampling.
extra = None
for idx, decode in enumerate(demucs.decoder):
skip = skips.pop(-1)
x += skip[..., :x.shape[-1]]
x = fast_conv(decode[0], x)
x = decode[1](x)
if extra is not None:
skip = skip[..., x.shape[-1]:]
extra += skip[..., :extra.shape[-1]]
extra = decode[2](decode[1](decode[0](extra)))
x = decode[2](x)
next_state.append(
x[..., -demucs.stride:] - decode[2].bias.view(-1, 1)
)
if extra is None:
extra = x[..., -demucs.stride:]
else:
extra[..., :demucs.stride] += next_state[-1]
x = x[..., :-demucs.stride]
if not first:
prev = self.conv_state.pop(0)
x[..., :demucs.stride] += prev
if idx != demucs.depth - 1:
x = decode[3](x)
extra = decode[3](extra)
self.conv_state = next_state
return x[0], extra[0]
def test():
import argparse
parser = argparse.ArgumentParser(
"denoiser.demucs",
description="Benchmark the streaming Demucs implementation, as well as "
"checking the delta with the offline implementation.")
parser.add_argument("--depth", default=5, type=int)
parser.add_argument("--resample", default=4, type=int)
parser.add_argument("--hidden", default=48, type=int)
parser.add_argument("--sample_rate", default=16000, type=float)
parser.add_argument("--device", default="cpu")
parser.add_argument("-t", "--num_threads", type=int)
parser.add_argument("-f", "--num_frames", type=int, default=1)
args = parser.parse_args()
if args.num_threads:
th.set_num_threads(args.num_threads)
sr = args.sample_rate
sr_ms = sr / 1000
demucs = Demucs(
depth=args.depth, hidden=args.hidden, resample=args.resample
).to(args.device)
x = th.randn(1, int(sr * 4)).to(args.device)
out = demucs(x[None])[0]
streamer = DemucsStreamer(demucs, num_frames=args.num_frames)
out_rt = []
frame_size = streamer.total_length
with th.no_grad():
while x.shape[1] > 0:
out_rt.append(streamer.feed(x[:, :frame_size]))
x = x[:, frame_size:]
frame_size = streamer.demucs.total_stride
out_rt.append(streamer.flush())
out_rt = th.cat(out_rt, 1)
model_size = sum(p.numel() for p in demucs.parameters()) * 4 / 2**20
initial_lag = streamer.total_length / sr_ms
tpf = 1000 * streamer.time_per_frame
print(f"model size: {model_size:.1f}MB, ", end='')
print(f"delta batch/streaming: {th.norm(out - out_rt) / th.norm(out):.2%}")
print(f"initial lag: {initial_lag:.1f}ms, ", end='')
print(f"stride: {streamer.stride * args.num_frames / sr_ms:.1f}ms")
print(f"time per frame: {tpf:.1f}ms, ", end='')
rtf = (1000 * streamer.time_per_frame) / (streamer.stride / sr_ms)
print(f"RTF: {rtf:.2f}")
print(f"Total lag with computation: {initial_lag + tpf:.1f}ms")
if __name__ == "__main__":
test()
| 16,989 | 34.843882 | 83 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import functools
import logging
from contextlib import contextmanager
import inspect
import time
logger = logging.getLogger(__name__)
EPS = 1e-8
def capture_init(init):
"""capture_init.
Decorate `__init__` with this, and you can then
recover the *args and **kwargs passed to it in `self._init_args_kwargs`
"""
@functools.wraps(init)
def __init__(self, *args, **kwargs):
self._init_args_kwargs = (args, kwargs)
init(self, *args, **kwargs)
return __init__
def deserialize_model(package, strict=False):
"""deserialize_model.
"""
klass = package['class']
if strict:
model = klass(*package['args'], **package['kwargs'])
else:
sig = inspect.signature(klass)
kw = package['kwargs']
for key in list(kw):
if key not in sig.parameters:
logger.warning("Dropping inexistant parameter %s", key)
del kw[key]
model = klass(*package['args'], **kw)
model.load_state_dict(package['state'])
return model
def copy_state(state):
return {k: v.cpu().clone() for k, v in state.items()}
def serialize_model(model):
args, kwargs = model._init_args_kwargs
state = copy_state(model.state_dict())
return {"class": model.__class__, "args": args, "kwargs": kwargs, "state": state}
@contextmanager
def swap_state(model, state):
"""
Context manager that swaps the state of a model, e.g:
# model is in old state
with swap_state(model, new_state):
# model in new state
# model back to old state
"""
old_state = copy_state(model.state_dict())
model.load_state_dict(state)
try:
yield
finally:
model.load_state_dict(old_state)
def pull_metric(history, name):
out = []
for metrics in history:
if name in metrics:
out.append(metrics[name])
return out
class LogProgress:
"""
Sort of like tqdm but using log lines and not as real time.
Args:
- logger: logger obtained from `logging.getLogger`,
- iterable: iterable object to wrap
- updates (int): number of lines that will be printed, e.g.
if `updates=5`, log every 1/5th of the total length.
- total (int): length of the iterable, in case it does not support
`len`.
- name (str): prefix to use in the log.
- level: logging level (like `logging.INFO`).
"""
def __init__(self,
logger,
iterable,
updates=5,
total=None,
name="LogProgress",
level=logging.INFO):
self.iterable = iterable
self.total = total or len(iterable)
self.updates = updates
self.name = name
self.logger = logger
self.level = level
def update(self, **infos):
self._infos = infos
def __iter__(self):
self._iterator = iter(self.iterable)
self._index = -1
self._infos = {}
self._begin = time.time()
return self
def __next__(self):
self._index += 1
try:
value = next(self._iterator)
except StopIteration:
raise
else:
return value
finally:
log_every = max(1, self.total // self.updates)
# logging is delayed by 1 it, in order to have the metrics from update
if self._index >= 1 and self._index % log_every == 0:
self._log()
def _log(self):
self._speed = (1 + self._index) / (time.time() - self._begin)
infos = " | ".join(f"{k.capitalize()} {v}" for k, v in self._infos.items())
if self._speed < 1e-4:
speed = "oo sec/it"
elif self._speed < 0.1:
speed = f"{1/self._speed:.1f} sec/it"
else:
speed = f"{self._speed:.1f} it/sec"
out = f"{self.name} | {self._index}/{self.total} | {speed}"
if infos:
out += " | " + infos
self.logger.log(self.level, out)
def colorize(text, color):
"""
Display text with some ANSI color in the terminal.
"""
code = f"\033[{color}m"
restore = "\033[0m"
return "".join([code, text, restore])
def bold(text):
"""
Display text in bold in the terminal.
"""
return colorize(text, "1")
def cal_snr(lbl, est):
import torch
y = 10.0 * torch.log10(
torch.sum(lbl**2, dim=-1) / (torch.sum((est-lbl)**2, dim=-1) + EPS) +
EPS
)
return y
| 4,770 | 25.954802 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/pretrained.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import logging
import torch.hub
from .demucs import Demucs
from .utils import deserialize_model
logger = logging.getLogger(__name__)
ROOT = "https://dl.fbaipublicfiles.com/adiyoss/denoiser/"
DNS_48_URL = ROOT + "dns48-11decc9d8e3f0998.th"
DNS_64_URL = ROOT + "dns64-a7761ff99a7d5bb6.th"
MASTER_64_URL = ROOT + "master64-8a5dfb4bb92753dd.th"
def _demucs(pretrained, url, **kwargs):
model = Demucs(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu')
model.load_state_dict(state_dict)
return model
def dns48(pretrained=True):
return _demucs(pretrained, DNS_48_URL, hidden=48)
def dns64(pretrained=True):
return _demucs(pretrained, DNS_64_URL, hidden=64)
def master64(pretrained=True):
return _demucs(pretrained, MASTER_64_URL, hidden=64)
def add_model_flags(parser):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-m", "--model_path", help="Path to local trained model."
)
group.add_argument(
"--dns48", action="store_true",
help="Use pre-trained real time H=48 model trained on DNS."
)
group.add_argument(
"--dns64", action="store_true",
help="Use pre-trained real time H=64 model trained on DNS."
)
group.add_argument(
"--master64", action="store_true",
help="Use pre-trained real time H=64 model trained on DNS and Valentini."
)
def get_model(args):
"""
Load local model package or torchhub pre-trained model.
"""
if args.model_path:
logger.info("Loading model from %s", args.model_path)
pkg = torch.load(args.model_path)
model = deserialize_model(pkg)
elif args.dns64:
logger.info("Loading pre-trained real time H=64 model trained on DNS.")
model = dns64()
elif args.master64:
logger.info(
"Loading pre-trained real time H=64 model trained on DNS and Valentini."
)
model = master64()
else:
logger.info("Loading pre-trained real time H=48 model trained on DNS.")
model = dns48()
logger.debug(model)
return model
| 2,384 | 28.085366 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/denoiser/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| 177 | 34.6 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/speaker_embedder/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import librosa
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torchaudio
EMBEDDER_PARAMS = {
'num_mels': 40,
'n_fft': 512,
'emb_dim': 256,
'lstm_hidden': 768,
'lstm_layers': 3,
'window': 80,
'stride': 40,
}
def set_requires_grad(nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary
computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
class LinearNorm(nn.Module):
def __init__(self, hp):
super(LinearNorm, self).__init__()
self.linear_layer = nn.Linear(hp["lstm_hidden"], hp["emb_dim"])
def forward(self, x):
return self.linear_layer(x)
class SpeechEmbedder(nn.Module):
def __init__(self, hp):
super(SpeechEmbedder, self).__init__()
self.lstm = nn.LSTM(hp["num_mels"],
hp["lstm_hidden"],
num_layers=hp["lstm_layers"],
batch_first=True)
self.proj = LinearNorm(hp)
self.hp = hp
def forward(self, mel):
# (num_mels, T) -> (num_mels, T', window)
mels = mel.unfold(1, self.hp["window"], self.hp["stride"])
mels = mels.permute(1, 2, 0) # (T', window, num_mels)
x, _ = self.lstm(mels) # (T', window, lstm_hidden)
x = x[:, -1, :] # (T', lstm_hidden), use last frame only
x = self.proj(x) # (T', emb_dim)
x = x / torch.norm(x, p=2, dim=1, keepdim=True) # (T', emb_dim)
x = x.mean(dim=0)
if x.norm(p=2) != 0:
x = x / x.norm(p=2)
return x
class SpkrEmbedder(nn.Module):
RATE = 16000
def __init__(
self,
embedder_path,
embedder_params=EMBEDDER_PARAMS,
rate=16000,
hop_length=160,
win_length=400,
pad=False,
):
super(SpkrEmbedder, self).__init__()
embedder_pt = torch.load(embedder_path, map_location="cpu")
self.embedder = SpeechEmbedder(embedder_params)
self.embedder.load_state_dict(embedder_pt)
self.embedder.eval()
set_requires_grad(self.embedder, requires_grad=False)
self.embedder_params = embedder_params
self.register_buffer('mel_basis', torch.from_numpy(
librosa.filters.mel(
sr=self.RATE,
n_fft=self.embedder_params["n_fft"],
n_mels=self.embedder_params["num_mels"])
)
)
self.resample = None
if rate != self.RATE:
self.resample = torchaudio.transforms.Resample(rate, self.RATE)
self.hop_length = hop_length
self.win_length = win_length
self.pad = pad
def get_mel(self, y):
if self.pad and y.shape[-1] < 14000:
y = F.pad(y, (0, 14000 - y.shape[-1]))
window = torch.hann_window(self.win_length).to(y)
y = torch.stft(y, n_fft=self.embedder_params["n_fft"],
hop_length=self.hop_length,
win_length=self.win_length,
window=window)
magnitudes = torch.norm(y, dim=-1, p=2) ** 2
mel = torch.log10(self.mel_basis @ magnitudes + 1e-6)
return mel
def forward(self, inputs):
dvecs = []
for wav in inputs:
mel = self.get_mel(wav)
if mel.dim() == 3:
mel = mel.squeeze(0)
dvecs += [self.embedder(mel)]
dvecs = torch.stack(dvecs)
dvec = torch.mean(dvecs, dim=0)
dvec = dvec / torch.norm(dvec)
return dvec
| 4,103 | 29.176471 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/speech_synthesis/preprocessing/vad/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import contextlib
import wave
try:
import webrtcvad
except ImportError:
raise ImportError("Please install py-webrtcvad: pip install webrtcvad")
import argparse
import os
import logging
from tqdm import tqdm
AUDIO_SUFFIX = '.wav'
FS_MS = 30
SCALE = 6e-5
THRESHOLD = 0.3
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
"""Writes a .wav file.
Takes path, PCM audio data, and sample rate.
"""
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
# sys.stdout.write('1' if is_speech else '0')
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, _ in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
triggered = False
yield [b''.join([f.bytes for f in voiced_frames]),
voiced_frames[0].timestamp, voiced_frames[-1].timestamp]
ring_buffer.clear()
voiced_frames = []
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield [b''.join([f.bytes for f in voiced_frames]),
voiced_frames[0].timestamp, voiced_frames[-1].timestamp]
def main(args):
# create output folder
try:
cmd = f"mkdir -p {args.out_path}"
os.system(cmd)
except Exception:
logging.error("Can not create output folder")
exit(-1)
# build vad object
vad = webrtcvad.Vad(int(args.agg))
# iterating over wavs in dir
for file in tqdm(os.listdir(args.in_path)):
if file.endswith(AUDIO_SUFFIX):
audio_inpath = os.path.join(args.in_path, file)
audio_outpath = os.path.join(args.out_path, file)
audio, sample_rate = read_wave(audio_inpath)
frames = frame_generator(FS_MS, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, FS_MS, 300, vad, frames)
merge_segments = list()
timestamp_start = 0.0
timestamp_end = 0.0
# removing start, end, and long sequences of sils
for i, segment in enumerate(segments):
merge_segments.append(segment[0])
if i and timestamp_start:
sil_duration = segment[1] - timestamp_end
if sil_duration > THRESHOLD:
merge_segments.append(int(THRESHOLD / SCALE)*(b'\x00'))
else:
merge_segments.append(int((sil_duration / SCALE))*(b'\x00'))
timestamp_start = segment[1]
timestamp_end = segment[2]
segment = b''.join(merge_segments)
write_wave(audio_outpath, segment, sample_rate)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Apply vad to a file of fils.')
parser.add_argument('in_path', type=str, help='Path to the input files')
parser.add_argument('out_path', type=str,
help='Path to save the processed files')
parser.add_argument('--agg', type=int, default=3,
help='The level of aggressiveness of the VAD: [0-3]')
args = parser.parse_args()
main(args)
| 7,372 | 37.202073 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/byte_level_bpe/get_bitext.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import os.path as op
from collections import namedtuple
from multiprocessing import cpu_count
from typing import List, Optional
import sentencepiece as sp
from fairseq.data.encoders.byte_bpe import ByteBPE
from fairseq.data.encoders.byte_utils import byte_encode
from fairseq.data.encoders.bytes import Bytes
from fairseq.data.encoders.characters import Characters
from fairseq.data.encoders.moses_tokenizer import MosesTokenizer
from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE
SPLITS = ["train", "valid", "test"]
def _convert_xml(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
ss = s.strip()
if not ss.startswith("<seg"):
continue
ss = ss.replace("</seg>", "").split('">')
assert len(ss) == 2
f_o.write(ss[1].strip() + "\n")
def _convert_train(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
ss = s.strip()
if ss.startswith("<"):
continue
f_o.write(ss.strip() + "\n")
def _get_bytes(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(Bytes.encode(s.strip()) + "\n")
def _get_chars(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(Characters.encode(s.strip()) + "\n")
def pretokenize(in_path: str, out_path: str, src: str, tgt: str):
Args = namedtuple(
"Args",
[
"moses_source_lang",
"moses_target_lang",
"moses_no_dash_splits",
"moses_no_escape",
],
)
args = Args(
moses_source_lang=src,
moses_target_lang=tgt,
moses_no_dash_splits=False,
moses_no_escape=False,
)
pretokenizer = MosesTokenizer(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(pretokenizer.encode(s.strip()) + "\n")
def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str):
with open(out_path, "w") as f_o:
for lang in [src, tgt]:
with open(f"{in_path_prefix}.{lang}") as f:
for s in f:
f_o.write(byte_encode(s.strip()) + "\n")
def _get_bpe(in_path: str, model_prefix: str, vocab_size: int):
arguments = [
f"--input={in_path}",
f"--model_prefix={model_prefix}",
f"--model_type=bpe",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
"--normalization_rule_name=identity",
f"--num_threads={cpu_count()}",
]
sp.SentencePieceTrainer.Train(" ".join(arguments))
def _apply_bbpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple("Args", ["sentencepiece_model_path"])
args = Args(sentencepiece_model_path=model_path)
tokenizer = ByteBPE(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(tokenizer.encode(s.strip()) + "\n")
def _apply_bpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple("Args", ["sentencepiece_model"])
args = Args(sentencepiece_model=model_path)
tokenizer = SentencepieceBPE(args)
with open(in_path) as f, open(out_path, "w") as f_o:
for s in f:
f_o.write(tokenizer.encode(s.strip()) + "\n")
def _concat_files(in_paths: List[str], out_path: str):
with open(out_path, "w") as f_o:
for p in in_paths:
with open(p) as f:
for r in f:
f_o.write(r)
def preprocess_iwslt17(
root: str,
src: str,
tgt: str,
bpe_size: Optional[int],
need_chars: bool,
bbpe_size: Optional[int],
need_bytes: bool,
):
# extract bitext
in_root = op.join(root, f"{src}-{tgt}")
for lang in [src, tgt]:
_convert_train(
op.join(in_root, f"train.tags.{src}-{tgt}.{lang}"),
op.join(root, f"train.{lang}"),
)
_convert_xml(
op.join(in_root, f"IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml"),
op.join(root, f"valid.{lang}"),
)
_convert_xml(
op.join(in_root, f"IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml"),
op.join(root, f"test.{lang}"),
)
# pre-tokenize
for lang in [src, tgt]:
for split in SPLITS:
pretokenize(
op.join(root, f"{split}.{lang}"),
op.join(root, f"{split}.moses.{lang}"),
src,
tgt,
)
# tokenize with BPE vocabulary
if bpe_size is not None:
# learn vocabulary
concated_train_path = op.join(root, "train.all")
_concat_files(
[op.join(root, "train.moses.fr"), op.join(root, "train.moses.en")],
concated_train_path,
)
bpe_model_prefix = op.join(root, f"spm_bpe{bpe_size}")
_get_bpe(concated_train_path, bpe_model_prefix, bpe_size)
os.remove(concated_train_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bpe(
bpe_model_prefix + ".model",
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bpe{bpe_size}.{lang}"),
)
# tokenize with bytes vocabulary
if need_bytes:
for lang in [src, tgt]:
for split in SPLITS:
_get_bytes(
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bytes.{lang}"),
)
# tokenize with characters vocabulary
if need_chars:
for lang in [src, tgt]:
for split in SPLITS:
_get_chars(
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.chars.{lang}"),
)
# tokenize with byte-level BPE vocabulary
if bbpe_size is not None:
# learn vocabulary
bchar_path = op.join(root, "train.bchar")
_convert_to_bchar(op.join(root, "train.moses"), src, tgt, bchar_path)
bbpe_model_prefix = op.join(root, f"spm_bbpe{bbpe_size}")
_get_bpe(bchar_path, bbpe_model_prefix, bbpe_size)
os.remove(bchar_path)
# apply
for lang in [src, tgt]:
for split in SPLITS:
_apply_bbpe(
bbpe_model_prefix + ".model",
op.join(root, f"{split}.moses.{lang}"),
op.join(root, f"{split}.moses.bbpe{bbpe_size}.{lang}"),
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--root", type=str, default="data")
parser.add_argument(
"--bpe-vocab",
default=None,
type=int,
help="Generate tokenized bitext with BPE of size K."
"Default to None (disabled).",
)
parser.add_argument(
"--bbpe-vocab",
default=None,
type=int,
help="Generate tokenized bitext with BBPE of size K."
"Default to None (disabled).",
)
parser.add_argument(
"--byte-vocab",
action="store_true",
help="Generate tokenized bitext with bytes vocabulary",
)
parser.add_argument(
"--char-vocab",
action="store_true",
help="Generate tokenized bitext with chars vocabulary",
)
args = parser.parse_args()
preprocess_iwslt17(
args.root,
"fr",
"en",
args.bpe_vocab,
args.char_vocab,
args.bbpe_vocab,
args.byte_vocab,
)
if __name__ == "__main__":
main()
| 7,993 | 30.34902 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/byte_level_bpe/gru_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import TransformerEncoder, TransformerModel
@register_model("gru_transformer")
class GRUTransformerModel(TransformerModel):
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return GRUTransformerEncoder(args, src_dict, embed_tokens)
class GRUTransformerEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.emb_ctx = nn.GRU(
input_size=embed_tokens.embedding_dim,
hidden_size=embed_tokens.embedding_dim // 2,
num_layers=1,
bidirectional=True,
)
def forward_embedding(self, src_tokens):
# embed tokens and positions
x = embed = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
# contextualize embeddings
x = x.transpose(0, 1)
x = self.dropout_module(x)
x, _ = self.emb_ctx.forward(x)
x = x.transpose(0, 1)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
@register_model_architecture("gru_transformer", "gru_transformer")
def gru_transformer_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.layer_wise_attention = getattr(args, "layer_wise_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
@register_model_architecture("gru_transformer", "gru_transformer_big")
def gru_transformer_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
gru_transformer_base_architecture(args)
| 5,027 | 45.555556 | 87 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import models # noqa
| 207 | 28.714286 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/modules/monotonic_multihead_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch import Tensor
import torch.nn as nn
from examples.simultaneous_translation.utils.p_choose_strategy import (
learnable_p_choose,
waitk_p_choose
)
from examples.simultaneous_translation.utils.monotonic_attention import (
expected_alignment_from_p_choose,
expected_soft_attention,
mass_preservation,
)
from fairseq.modules import MultiheadAttention
from . import register_monotonic_attention
from typing import Dict, Optional
@register_monotonic_attention("hard_aligned")
class MonotonicAttention(MultiheadAttention):
"""
Abstract class of monotonic attentions
"""
k_in_proj: Dict[str, nn.Linear]
q_in_proj: Dict[str, nn.Linear]
def __init__(self, args):
super().__init__(
embed_dim=args.decoder_embed_dim,
num_heads=args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
)
self.soft_attention = False
self.eps = getattr(args, "attention_eps", True)
self.mass_preservation = getattr(args, "mass_preservation", True)
self.noise_type = args.noise_type
self.noise_mean = args.noise_mean
self.noise_var = args.noise_var
self.energy_bias_init = args.energy_bias_init
self.energy_bias = (
nn.Parameter(self.energy_bias_init * torch.ones([1]))
if args.energy_bias is True
else 0
)
self.k_in_proj = {"monotonic": self.k_proj}
self.q_in_proj = {"monotonic": self.q_proj}
self.chunk_size = None
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--no-mass-preservation', action="store_false",
dest="mass_preservation",
help='Do not stay on the last token when decoding')
parser.add_argument('--mass-preservation', action="store_true",
dest="mass_preservation",
help='Stay on the last token when decoding')
parser.set_defaults(mass_preservation=True)
parser.add_argument('--noise-var', type=float, default=1.0,
help='Variance of discretness noise')
parser.add_argument('--noise-mean', type=float, default=0.0,
help='Mean of discretness noise')
parser.add_argument('--noise-type', type=str, default="flat",
help='Type of discretness noise')
parser.add_argument('--energy-bias', action="store_true",
default=False,
help='Bias for energy')
parser.add_argument('--energy-bias-init', type=float, default=-2.0,
help='Initial value of the bias for energy')
parser.add_argument('--attention-eps', type=float, default=1e-6,
help='Epsilon when calculating expected attention')
def energy_from_qk(
self,
query: Tensor,
key: Tensor,
energy_type: str,
key_padding_mask: Optional[Tensor] = None,
bias: int = 0
):
"""
Compute energy from query and key
q_func_value is a tuple looks like
(q_proj_func, q_tensor)
q_tensor size: bsz, tgt_len, emb_dim
k_tensor size: bsz, src_len, emb_dim
key_padding_mask size: bsz, src_len
attn_mask: bsz, src_len
"""
length, bsz, _ = query.size()
q = self.q_in_proj[energy_type].forward(query)
q = (
q.contiguous()
.view(length, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
q = q * self.scaling
length, bsz, _ = key.size()
k = self.k_in_proj[energy_type].forward(key)
k = (
k.contiguous()
.view(length, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
energy = torch.bmm(q, k.transpose(1, 2)) + bias
if key_padding_mask is not None:
energy = energy.masked_fill(
key_padding_mask.unsqueeze(1).to(torch.bool),
- float("inf")
)
return energy
def p_choose_from_qk(self, query, key, key_padding_mask, incremental_states=None):
monotonic_energy = self.energy_from_qk(
query,
key,
"monotonic",
key_padding_mask=key_padding_mask,
bias=self.energy_bias,
)
p_choose = learnable_p_choose(
monotonic_energy,
self.noise_mean,
self.noise_var,
self.training
)
return p_choose
def p_choose(self, query, key, key_padding_mask, incremental_states=None):
return self.p_choose_from_qk(self, query, key, key_padding_mask)
def monotonic_attention_process_infer(
self,
query: Optional[Tensor],
key: Optional[Tensor],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
):
"""
Monotonic attention at inference time
Notice that this function is designed for simuleval not sequence_generator
"""
assert query is not None
assert key is not None
if query.size(1) != 1:
raise RuntimeError(
"Simultaneous translation models don't support batch decoding."
)
# 1. compute stepwise probability
p_choose = self.p_choose(
query, key, None, incremental_state
).squeeze(1)
# 2. Compute the alpha
src_len = key.size(0)
# Maximum steps allows in this iteration
max_steps = src_len - 1 if self.mass_preservation else src_len
monotonic_cache = self._get_monotonic_buffer(incremental_state)
# Step for each head
monotonic_step = monotonic_cache.get(
'head_step',
p_choose.new_zeros(1, self.num_heads).long()
)
assert monotonic_step is not None
finish_read = monotonic_step.eq(max_steps)
p_choose_i = torch.tensor(1)
while finish_read.sum().item() < self.num_heads:
# p_choose: self.num_heads, src_len
# only choose the p at monotonic steps
# p_choose_i: 1, self.num_heads
p_choose_i = (
p_choose.gather(
1,
monotonic_step
.clamp(0, src_len - 1),
)
)
read_one_step = (
(p_choose_i < 0.5)
.type_as(monotonic_step)
.masked_fill(finish_read, 0)
)
# 1 x bsz
# sample actions on unfinished seq
# 0 means stay, finish reading
# 1 means leave, continue reading
monotonic_step += read_one_step
finish_read = monotonic_step.eq(max_steps) | (read_one_step == 0)
# p_choose at last steps
p_choose_i = (
p_choose.gather(
1,
monotonic_step
.clamp(0, src_len - 1),
)
)
monotonic_cache["head_step"] = monotonic_step
# Whether a head is looking for new input
monotonic_cache["head_read"] = (
monotonic_step.eq(max_steps) & (p_choose_i < 0.5)
)
self._set_monotonic_buffer(incremental_state, monotonic_cache)
# 2. Update alpha
alpha = (
p_choose
.new_zeros([self.num_heads, src_len])
.scatter(
1,
(monotonic_step)
.view(self.num_heads, 1).clamp(0, src_len - 1),
1
)
)
if not self.mass_preservation:
alpha = alpha.masked_fill(
(monotonic_step == max_steps)
.view(self.num_heads, 1),
0
)
# 4. Compute Beta
if self.soft_attention:
monotonic_step = monotonic_step.t()
beta_mask = torch.arange(src_len).expand_as(alpha).gt(monotonic_step).unsqueeze(1)
# If it's soft attention just do softmax on current context
soft_energy = self.energy_from_qk(
query,
key,
"soft"
)
beta = torch.nn.functional.softmax(
soft_energy.masked_fill(beta_mask, -float("inf")), dim=-1
)
# It could happen that a head doesn't move at all
beta = beta.masked_fill(monotonic_step.eq(0).unsqueeze(1), 0)
else:
# If it's hard attention just select the last state
beta = alpha
return p_choose, alpha, beta
def monotonic_attention_process_train(
self,
query: Optional[Tensor],
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
):
"""
Calculating monotonic attention process for training
Including:
stepwise probability: p_choose
expected hard alignment: alpha
expected soft attention: beta
"""
assert query is not None
assert key is not None
# 1. compute stepwise probability
p_choose = self.p_choose_from_qk(query, key, key_padding_mask)
# 2. compute expected_alignment
alpha = expected_alignment_from_p_choose(
p_choose,
key_padding_mask,
eps=self.eps,
)
if self.mass_preservation:
alpha = mass_preservation(
alpha, key_padding_mask
)
# 3. compute expected soft attention (soft aligned model only)
if self.soft_attention:
soft_energy = self.energy_from_qk(
query,
key,
"soft",
key_padding_mask=None,
)
beta = expected_soft_attention(
alpha,
soft_energy,
padding_mask=key_padding_mask,
chunk_size=self.chunk_size,
eps=self.eps,
)
else:
beta = alpha
soft_energy = alpha
return p_choose, alpha, beta, soft_energy
def forward(
self,
query: Optional[Tensor],
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
attn_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True, static_kv: bool = False, need_head_weights: bool = False,
):
"""
query: tgt_len, bsz, embed_dim
key: src_len, bsz, embed_dim
value: src_len, bsz, embed_dim
"""
assert attn_mask is None
assert query is not None
assert key is not None
assert value is not None
tgt_len, bsz, embed_dim = query.size()
src_len = value.size(0)
if key_padding_mask is not None:
assert not key_padding_mask[:, 0].any(), (
"Only right padding is supported."
)
key_padding_mask = (
key_padding_mask
.unsqueeze(1)
.expand([bsz, self.num_heads, src_len])
.contiguous()
.view(-1, src_len)
)
if incremental_state is not None:
# Inference
(
p_choose, alpha, beta
) = self.monotonic_attention_process_infer(
query, key, incremental_state
)
soft_energy = beta
else:
# Train
(
p_choose, alpha, beta, soft_energy
) = self.monotonic_attention_process_train(
query, key, key_padding_mask
)
v = self.v_proj(value)
length, bsz, _ = v.size()
v = (
v.contiguous()
.view(length, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
attn = torch.bmm(beta.type_as(v), v)
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
p_choose = p_choose.view(bsz, self.num_heads, tgt_len, src_len)
alpha = alpha.view(bsz, self.num_heads, tgt_len, src_len)
beta = beta.view(bsz, self.num_heads, tgt_len, src_len)
return attn, {
"p_choose": p_choose,
"alpha": alpha,
"beta": beta,
}
def _get_monotonic_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]):
maybe_incremental_state = self.get_incremental_state(
incremental_state,
'monotonic',
)
if maybe_incremental_state is None:
typed_empty_dict: Dict[str, Optional[Tensor]] = {}
return typed_empty_dict
else:
return maybe_incremental_state
def _set_monotonic_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], buffer: Dict[str, Optional[Tensor]]):
self.set_incremental_state(
incremental_state,
'monotonic',
buffer,
)
@register_monotonic_attention("infinite_lookback")
class MonotonicInfiniteLookbackAttention(
MonotonicAttention
):
def __init__(self, args):
super().__init__(args)
self.soft_attention = True
self.init_soft_attention()
def init_soft_attention(self):
self.k_proj_soft = nn.Linear(self.kdim, self.embed_dim, bias=True)
self.q_proj_soft = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
self.k_in_proj["soft"] = self.k_proj_soft
self.q_in_proj["soft"] = self.q_proj_soft
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(
self.k_in_proj["soft"].weight, gain=1 / math.sqrt(2)
)
nn.init.xavier_uniform_(
self.q_in_proj["soft"].weight, gain=1 / math.sqrt(2)
)
else:
nn.init.xavier_uniform_(self.k_in_proj["soft"].weight)
nn.init.xavier_uniform_(self.q_in_proj["soft"].weight)
@register_monotonic_attention("waitk")
class WaitKAttention(
MonotonicInfiniteLookbackAttention
):
"""
STACL: Simultaneous Translation with Implicit Anticipation and
Controllable Latency using Prefix-to-Prefix Framework
https://www.aclweb.org/anthology/P19-1289/
"""
def __init__(self, args):
super().__init__(args)
self.q_in_proj["soft"] = self.q_in_proj["monotonic"]
self.k_in_proj["soft"] = self.k_in_proj["monotonic"]
self.waitk_lagging = args.waitk_lagging
assert self.waitk_lagging > 0, (
f"Lagging has to been larger than 0, get {self.waitk_lagging}."
)
@staticmethod
def add_args(parser):
super(
MonotonicInfiniteLookbackAttention,
MonotonicInfiniteLookbackAttention
).add_args(parser)
parser.add_argument(
"--waitk-lagging", type=int, required=True, help="Wait K lagging"
)
def p_choose_from_qk(
self,
query: Optional[Tensor],
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
assert query is not None
assert key is not None
p_choose = waitk_p_choose(
tgt_len=query.size(0),
src_len=key.size(0),
bsz=query.size(1) * self.num_heads,
waitk_lagging=self.waitk_lagging,
key_padding_mask=key_padding_mask,
incremental_state=incremental_state,
)
return p_choose.to(query)
@register_monotonic_attention("chunkwise")
class ChunkwiseAttention(
MonotonicInfiniteLookbackAttention
):
def __init__(self, args):
super().__init__(args)
self.chunk_size = args.mocha_chunk_size
assert self.chunk_size > 1
@staticmethod
def add_args(parser):
super(
MonotonicInfiniteLookbackAttention
).add_args(parser)
parser.add_argument(
"--mocha-chunk-size", type=int,
required=True, help="Mocha chunk size"
)
| 16,858 | 31.421154 | 142 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
from . import build_monotonic_attention
from typing import Dict, Optional, List
from torch import Tensor
import torch
class TransformerMonotonicEncoderLayer(TransformerEncoderLayer):
def forward(self, x, encoder_padding_mask):
seq_len, _, _ = x.size()
attn_mask = x.new_ones([seq_len, seq_len]).triu(1)
attn_mask = attn_mask.masked_fill(attn_mask.bool(), float("-inf"))
return super().forward(x, encoder_padding_mask, attn_mask)
class TransformerMonotonicDecoderLayer(TransformerDecoderLayer):
def __init__(self, args):
super().__init__(args)
assert args.simul_type is not None, "A --simul-type is needed."
self.encoder_attn = build_monotonic_attention(args)
def prune_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
):
input_buffer = self.self_attn._get_input_buffer(incremental_state)
for key in ["prev_key", "prev_value"]:
input_buffer_key = input_buffer[key]
assert input_buffer_key is not None
if input_buffer_key.size(2) > 1:
input_buffer[key] = input_buffer_key[:, :, :-1, :]
else:
typed_empty_dict: Dict[str, Optional[Tensor]] = {}
input_buffer = typed_empty_dict
break
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, input_buffer)
def forward(
self,
x,
encoder_out: Optional[Tensor] = None,
encoder_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[Tensor]] = None,
prev_attn_state: Optional[List[Tensor]] = None,
self_attn_mask: Optional[Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
assert self.encoder_attn is not None
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
| 7,265 | 38.704918 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/modules/fixed_pre_decision.py
|
from functools import partial
import torch
from torch import Tensor
import math
import torch.nn.functional as F
from . import register_monotonic_attention
from .monotonic_multihead_attention import (
MonotonicAttention,
MonotonicInfiniteLookbackAttention,
WaitKAttention
)
from typing import Dict, Optional
def fixed_pooling_monotonic_attention(monotonic_attention):
def create_model(monotonic_attention, klass):
class FixedStrideMonotonicAttention(monotonic_attention):
def __init__(self, args):
self.waitk_lagging = 0
self.num_heads = 0
self.noise_mean = 0.0
self.noise_var = 0.0
super().__init__(args)
self.pre_decision_type = args.fixed_pre_decision_type
self.pre_decision_ratio = args.fixed_pre_decision_ratio
self.pre_decision_pad_threshold = args.fixed_pre_decision_pad_threshold
assert self.pre_decision_ratio > 1
if args.fixed_pre_decision_type == "average":
self.pooling_layer = torch.nn.AvgPool1d(
kernel_size=self.pre_decision_ratio,
stride=self.pre_decision_ratio,
ceil_mode=True,
)
elif args.fixed_pre_decision_type == "last":
def last(key):
if key.size(2) < self.pre_decision_ratio:
return key
else:
k = key[
:,
:,
self.pre_decision_ratio - 1:: self.pre_decision_ratio,
].contiguous()
if key.size(-1) % self.pre_decision_ratio != 0:
k = torch.cat([k, key[:, :, -1:]], dim=-1).contiguous()
return k
self.pooling_layer = last
else:
raise NotImplementedError
@staticmethod
def add_args(parser):
super(
FixedStrideMonotonicAttention, FixedStrideMonotonicAttention
).add_args(parser)
parser.add_argument(
"--fixed-pre-decision-ratio",
type=int,
required=True,
help=(
"Ratio for the fixed pre-decision,"
"indicating how many encoder steps will start"
"simultaneous decision making process."
),
)
parser.add_argument(
"--fixed-pre-decision-type",
default="average",
choices=["average", "last"],
help="Pooling type",
)
parser.add_argument(
"--fixed-pre-decision-pad-threshold",
type=float,
default=0.3,
help="If a part of the sequence has pad"
",the threshold the pooled part is a pad.",
)
def insert_zeros(self, x):
bsz_num_heads, tgt_len, src_len = x.size()
stride = self.pre_decision_ratio
weight = F.pad(torch.ones(1, 1, 1).to(x), (stride - 1, 0))
x_upsample = F.conv_transpose1d(
x.view(-1, src_len).unsqueeze(1),
weight,
stride=stride,
padding=0,
)
return x_upsample.squeeze(1).view(bsz_num_heads, tgt_len, -1)
def p_choose(
self,
query: Optional[Tensor],
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
assert key is not None
assert query is not None
src_len = key.size(0)
tgt_len = query.size(0)
batch_size = query.size(1)
key_pool = self.pooling_layer(key.transpose(0, 2)).transpose(0, 2)
if key_padding_mask is not None:
key_padding_mask_pool = (
self.pooling_layer(key_padding_mask.unsqueeze(0).float())
.squeeze(0)
.gt(self.pre_decision_pad_threshold)
)
# Make sure at least one element is not pad
key_padding_mask_pool[:, 0] = 0
else:
key_padding_mask_pool = None
if incremental_state is not None:
# The floor instead of ceil is used for inference
# But make sure the length key_pool at least 1
if (
max(1, math.floor(key.size(0) / self.pre_decision_ratio))
) < key_pool.size(0):
key_pool = key_pool[:-1]
if key_padding_mask_pool is not None:
key_padding_mask_pool = key_padding_mask_pool[:-1]
p_choose_pooled = self.p_choose_from_qk(
query,
key_pool,
key_padding_mask_pool,
incremental_state=incremental_state,
)
# Upsample, interpolate zeros
p_choose = self.insert_zeros(p_choose_pooled)
if p_choose.size(-1) < src_len:
# Append zeros if the upsampled p_choose is shorter than src_len
p_choose = torch.cat(
[
p_choose,
torch.zeros(
p_choose.size(0),
tgt_len,
src_len - p_choose.size(-1)
).to(p_choose)
],
dim=2
)
else:
# can be larger than src_len because we used ceil before
p_choose = p_choose[:, :, :src_len]
p_choose[:, :, -1] = p_choose_pooled[:, :, -1]
assert list(p_choose.size()) == [
batch_size * self.num_heads,
tgt_len,
src_len,
]
return p_choose
FixedStrideMonotonicAttention.__name__ = klass.__name__
return FixedStrideMonotonicAttention
return partial(create_model, monotonic_attention)
@register_monotonic_attention("waitk_fixed_pre_decision")
@fixed_pooling_monotonic_attention(WaitKAttention)
class WaitKAttentionFixedStride:
pass
@register_monotonic_attention("hard_aligned_fixed_pre_decision")
@fixed_pooling_monotonic_attention(MonotonicAttention)
class MonotonicAttentionFixedStride:
pass
@register_monotonic_attention("infinite_lookback_fixed_pre_decision")
@fixed_pooling_monotonic_attention(MonotonicInfiniteLookbackAttention)
class MonotonicInfiniteLookbackAttentionFixedStride:
pass
| 7,370 | 37.591623 | 91 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/modules/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import importlib
from fairseq import registry
(
build_monotonic_attention,
register_monotonic_attention,
MONOTONIC_ATTENTION_REGISTRY,
_,
) = registry.setup_registry("--simul-type")
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module(
"examples.simultaneous_translation.modules." + model_name
)
| 665 | 26.75 | 69 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/eval/agents/simul_t2t_enja.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from fairseq import checkpoint_utils, tasks
import sentencepiece as spm
import torch
try:
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from simuleval.agents import TextAgent
except ImportError:
print("Please install simuleval 'pip install simuleval'")
BOS_PREFIX = "\u2581"
class SimulTransTextAgentJA(TextAgent):
"""
Simultaneous Translation
Text agent for Japanese
"""
def __init__(self, args):
# Whether use gpu
self.gpu = getattr(args, "gpu", False)
# Max len
self.max_len = args.max_len
# Load Model
self.load_model_vocab(args)
# build word splitter
self.build_word_splitter(args)
self.eos = DEFAULT_EOS
def initialize_states(self, states):
states.incremental_states = dict()
states.incremental_states["online"] = dict()
def to_device(self, tensor):
if self.gpu:
return tensor.cuda()
else:
return tensor.cpu()
def load_model_vocab(self, args):
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
task_args = state["cfg"]["task"]
task_args.data = args.data_bin
task = tasks.setup_task(task_args)
# build model for ensemble
state["cfg"]["model"].load_pretrained_encoder_from = None
state["cfg"]["model"].load_pretrained_decoder_from = None
self.model = task.build_model(state["cfg"]["model"])
self.model.load_state_dict(state["model"], strict=True)
self.model.eval()
self.model.share_memory()
if self.gpu:
self.model.cuda()
# Set dictionary
self.dict = {}
self.dict["tgt"] = task.target_dictionary
self.dict["src"] = task.source_dictionary
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--model-path', type=str, required=True,
help='path to your pretrained model.')
parser.add_argument("--data-bin", type=str, required=True,
help="Path of data binary")
parser.add_argument("--max-len", type=int, default=100,
help="Max length of translation")
parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece",
help="Subword splitter type for target text.")
parser.add_argument("--tgt-splitter-path", type=str, default=None,
help="Subword splitter model path for target text.")
parser.add_argument("--src-splitter-type", type=str, default="SentencePiece",
help="Subword splitter type for source text.")
parser.add_argument("--src-splitter-path", type=str, default=None,
help="Subword splitter model path for source text.")
# fmt: on
return parser
def build_word_splitter(self, args):
self.spm = {}
for lang in ['src', 'tgt']:
if getattr(args, f'{lang}_splitter_type', None):
path = getattr(args, f'{lang}_splitter_path', None)
if path:
self.spm[lang] = spm.SentencePieceProcessor()
self.spm[lang].Load(path)
def segment_to_units(self, segment, states):
# Split a full word (segment) into subwords (units)
return self.spm['src'].EncodeAsPieces(segment)
def update_model_encoder(self, states):
if len(states.units.source) == 0:
return
src_indices = [
self.dict['src'].index(x)
for x in states.units.source.value
]
if states.finish_read():
# Append the eos index when the prediction is over
src_indices += [self.dict["tgt"].eos_index]
src_indices = self.to_device(
torch.LongTensor(src_indices).unsqueeze(0)
)
src_lengths = self.to_device(
torch.LongTensor([src_indices.size(1)])
)
states.encoder_states = self.model.encoder(src_indices, src_lengths)
torch.cuda.empty_cache()
def update_states_read(self, states):
# Happens after a read action.
self.update_model_encoder(states)
def units_to_segment(self, units, states):
# Merge sub words (units) to full word (segment).
# For Japanese, we can directly send
# the untokenized token to server except the BOS token
# with following option
# --sacrebleu-tokenizer MeCab
# --eval-latency-unit char
# --no-space
token = units.value.pop()
if (
token == self.dict["tgt"].eos_word
or len(states.segments.target) > self.max_len
):
return DEFAULT_EOS
if BOS_PREFIX == token:
return None
if token[0] == BOS_PREFIX:
return token[1:]
else:
return token
def policy(self, states):
if not getattr(states, "encoder_states", None):
# No encoder states, read a token first
return READ_ACTION
# encode previous predicted target tokens
tgt_indices = self.to_device(
torch.LongTensor(
[self.model.decoder.dictionary.eos()]
+ [
self.dict['tgt'].index(x)
for x in states.units.target.value
if x is not None
]
).unsqueeze(0)
)
# Current steps
states.incremental_states["steps"] = {
"src": states.encoder_states["encoder_out"][0].size(0),
"tgt": 1 + len(states.units.target),
}
# Online only means the reading is not finished
states.incremental_states["online"]["only"] = (
torch.BoolTensor([not states.finish_read()])
)
x, outputs = self.model.decoder.forward(
prev_output_tokens=tgt_indices,
encoder_out=states.encoder_states,
incremental_state=states.incremental_states,
)
states.decoder_out = x
torch.cuda.empty_cache()
if outputs.action == 0:
return READ_ACTION
else:
return WRITE_ACTION
def predict(self, states):
# Predict target token from decoder states
decoder_states = states.decoder_out
lprobs = self.model.get_normalized_probs(
[decoder_states[:, -1:]], log_probs=True
)
index = lprobs.argmax(dim=-1)[0, 0].item()
if index != self.dict['tgt'].eos_index:
token = self.dict['tgt'].string([index])
else:
token = self.dict['tgt'].eos_word
return token
| 7,099 | 30.277533 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module(
"examples.simultaneous_translation.models." + model_name
)
| 482 | 29.1875 | 68 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/models/transformer_monotonic_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from examples.simultaneous_translation.modules.monotonic_transformer_layer import (
TransformerMonotonicDecoderLayer,
TransformerMonotonicEncoderLayer,
)
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder,
base_architecture,
transformer_iwslt_de_en,
transformer_vaswani_wmt_en_de_big,
tiny_architecture
)
from torch import Tensor
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
READ_ACTION = 0
WRITE_ACTION = 1
TransformerMonotonicDecoderOut = NamedTuple(
"TransformerMonotonicDecoderOut",
[
("action", int),
("p_choose", Optional[Tensor]),
("attn_list", Optional[List[Optional[Dict[str, Tensor]]]]),
("encoder_out", Optional[Dict[str, List[Tensor]]]),
("encoder_padding_mask", Optional[Tensor]),
],
)
@register_model("transformer_unidirectional")
class TransformerUnidirectionalModel(TransformerModel):
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerMonotonicEncoder(args, src_dict, embed_tokens)
@register_model("transformer_monotonic")
class TransformerModelSimulTrans(TransformerModel):
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerMonotonicEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerMonotonicDecoder(args, tgt_dict, embed_tokens)
class TransformerMonotonicEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.dictionary = dictionary
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerMonotonicEncoderLayer(args)
for i in range(args.encoder_layers)
]
)
class TransformerMonotonicDecoder(TransformerDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False)
self.dictionary = dictionary
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerMonotonicDecoderLayer(args)
for _ in range(args.decoder_layers)
]
)
self.policy_criterion = getattr(args, "policy_criterion", "any")
self.num_updates = None
def set_num_updates(self, num_updates):
self.num_updates = num_updates
def pre_attention(
self,
prev_output_tokens,
encoder_out_dict: Dict[str, List[Tensor]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
positions = (
self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_out = encoder_out_dict["encoder_out"][0]
if "encoder_padding_mask" in encoder_out_dict:
encoder_padding_mask = (
encoder_out_dict["encoder_padding_mask"][0]
if encoder_out_dict["encoder_padding_mask"]
and len(encoder_out_dict["encoder_padding_mask"]) > 0
else None
)
else:
encoder_padding_mask = None
return x, encoder_out, encoder_padding_mask
def post_attention(self, x):
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x
def clean_cache(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
end_id: Optional[int] = None,
):
"""
Clean cache in the monotonic layers.
The cache is generated because of a forward pass of decoder has run but no prediction,
so that the self attention key value in decoder is written in the incremental state.
end_id is the last idx of the layers
"""
if end_id is None:
end_id = len(self.layers)
for index, layer in enumerate(self.layers):
if index < end_id:
layer.prune_incremental_state(incremental_state)
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False, # unused
alignment_layer: Optional[int] = None, # unused
alignment_heads: Optional[int] = None, # unsed
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# incremental_state = None
assert encoder_out is not None
(x, encoder_outs, encoder_padding_mask) = self.pre_attention(
prev_output_tokens, encoder_out, incremental_state
)
attn = None
inner_states = [x]
attn_list: List[Optional[Dict[str, Tensor]]] = []
p_choose = torch.tensor([1.0])
for i, layer in enumerate(self.layers):
x, attn, _ = layer(
x=x,
encoder_out=encoder_outs,
encoder_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
)
inner_states.append(x)
attn_list.append(attn)
if incremental_state is not None:
if_online = incremental_state["online"]["only"]
assert if_online is not None
if if_online.to(torch.bool):
# Online indicates that the encoder states are still changing
assert attn is not None
if self.policy_criterion == "any":
# Any head decide to read than read
head_read = layer.encoder_attn._get_monotonic_buffer(incremental_state)["head_read"]
assert head_read is not None
if head_read.any():
# We need to prune the last self_attn saved_state
# if model decide not to read
# otherwise there will be duplicated saved_state
self.clean_cache(incremental_state, i + 1)
return x, TransformerMonotonicDecoderOut(
action=0,
p_choose=p_choose,
attn_list=None,
encoder_out=None,
encoder_padding_mask=None,
)
x = self.post_attention(x)
return x, TransformerMonotonicDecoderOut(
action=1,
p_choose=p_choose,
attn_list=attn_list,
encoder_out=encoder_out,
encoder_padding_mask=encoder_padding_mask,
)
@register_model_architecture("transformer_monotonic", "transformer_monotonic")
def base_monotonic_architecture(args):
base_architecture(args)
args.encoder_unidirectional = getattr(args, "encoder_unidirectional", False)
@register_model_architecture(
"transformer_monotonic", "transformer_monotonic_iwslt_de_en"
)
def transformer_monotonic_iwslt_de_en(args):
transformer_iwslt_de_en(args)
base_monotonic_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture(
"transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_de_big"
)
def transformer_monotonic_vaswani_wmt_en_de_big(args):
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture(
"transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_fr_big"
)
def transformer_monotonic_vaswani_wmt_en_fr_big(args):
transformer_monotonic_vaswani_wmt_en_fr_big(args)
@register_model_architecture(
"transformer_unidirectional", "transformer_unidirectional_iwslt_de_en"
)
def transformer_unidirectional_iwslt_de_en(args):
transformer_iwslt_de_en(args)
@register_model_architecture("transformer_monotonic", "transformer_monotonic_tiny")
def monotonic_tiny_architecture(args):
tiny_architecture(args)
base_monotonic_architecture(args)
| 10,185 | 32.617162 | 108 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/models/convtransformer_simul_trans.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq import checkpoint_utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text import (
ConvTransformerModel,
convtransformer_espnet,
ConvTransformerEncoder,
)
from fairseq.models.speech_to_text.modules.augmented_memory_attention import (
augmented_memory,
SequenceEncoder,
AugmentedMemoryConvTransformerEncoder,
)
from torch import nn, Tensor
from typing import Dict, List
from fairseq.models.speech_to_text.modules.emformer import NoSegAugmentedMemoryTransformerEncoderLayer
@register_model("convtransformer_simul_trans")
class SimulConvTransformerModel(ConvTransformerModel):
"""
Implementation of the paper:
SimulMT to SimulST: Adapting Simultaneous Text Translation to
End-to-End Simultaneous Speech Translation
https://www.aclweb.org/anthology/2020.aacl-main.58.pdf
"""
@staticmethod
def add_args(parser):
super(SimulConvTransformerModel, SimulConvTransformerModel).add_args(parser)
parser.add_argument(
"--train-monotonic-only",
action="store_true",
default=False,
help="Only train monotonic attention",
)
@classmethod
def build_decoder(cls, args, task, embed_tokens):
tgt_dict = task.tgt_dict
from examples.simultaneous_translation.models.transformer_monotonic_attention import (
TransformerMonotonicDecoder,
)
decoder = TransformerMonotonicDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder
@register_model_architecture(
"convtransformer_simul_trans", "convtransformer_simul_trans_espnet"
)
def convtransformer_simul_trans_espnet(args):
convtransformer_espnet(args)
@register_model("convtransformer_augmented_memory")
@augmented_memory
class AugmentedMemoryConvTransformerModel(SimulConvTransformerModel):
@classmethod
def build_encoder(cls, args):
encoder = SequenceEncoder(args, AugmentedMemoryConvTransformerEncoder(args))
if getattr(args, "load_pretrained_encoder_from", None) is not None:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@register_model_architecture(
"convtransformer_augmented_memory", "convtransformer_augmented_memory"
)
def augmented_memory_convtransformer_espnet(args):
convtransformer_espnet(args)
# ============================================================================ #
# Convtransformer
# with monotonic attention decoder
# with emformer encoder
# ============================================================================ #
class ConvTransformerEmformerEncoder(ConvTransformerEncoder):
def __init__(self, args):
super().__init__(args)
stride = self.conv_layer_stride(args)
trf_left_context = args.segment_left_context // stride
trf_right_context = args.segment_right_context // stride
context_config = [trf_left_context, trf_right_context]
self.transformer_layers = nn.ModuleList(
[
NoSegAugmentedMemoryTransformerEncoderLayer(
input_dim=args.encoder_embed_dim,
num_heads=args.encoder_attention_heads,
ffn_dim=args.encoder_ffn_embed_dim,
num_layers=args.encoder_layers,
dropout_in_attn=args.dropout,
dropout_on_attn=args.dropout,
dropout_on_fc1=args.dropout,
dropout_on_fc2=args.dropout,
activation_fn=args.activation_fn,
context_config=context_config,
segment_size=args.segment_length,
max_memory_size=args.max_memory_size,
scaled_init=True, # TODO: use constant for now.
tanh_on_mem=args.amtrf_tanh_on_mem,
)
]
)
self.conv_transformer_encoder = ConvTransformerEncoder(args)
def forward(self, src_tokens, src_lengths):
encoder_out: Dict[str, List[Tensor]] = self.conv_transformer_encoder(src_tokens, src_lengths.to(src_tokens.device))
output = encoder_out["encoder_out"][0]
encoder_padding_masks = encoder_out["encoder_padding_mask"]
return {
"encoder_out": [output],
# This is because that in the original implementation
# the output didn't consider the last segment as right context.
"encoder_padding_mask": [encoder_padding_masks[0][:, : output.size(0)]] if len(encoder_padding_masks) > 0
else [],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@staticmethod
def conv_layer_stride(args):
# TODO: make it configurable from the args
return 4
@register_model("convtransformer_emformer")
class ConvtransformerEmformer(SimulConvTransformerModel):
@staticmethod
def add_args(parser):
super(ConvtransformerEmformer, ConvtransformerEmformer).add_args(parser)
parser.add_argument(
"--segment-length",
type=int,
metavar="N",
help="length of each segment (not including left context / right context)",
)
parser.add_argument(
"--segment-left-context",
type=int,
help="length of left context in a segment",
)
parser.add_argument(
"--segment-right-context",
type=int,
help="length of right context in a segment",
)
parser.add_argument(
"--max-memory-size",
type=int,
default=-1,
help="Right context for the segment.",
)
parser.add_argument(
"--amtrf-tanh-on-mem",
default=False,
action="store_true",
help="whether to use tanh on memory vector",
)
@classmethod
def build_encoder(cls, args):
encoder = ConvTransformerEmformerEncoder(args)
if getattr(args, "load_pretrained_encoder_from", None):
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@register_model_architecture(
"convtransformer_emformer",
"convtransformer_emformer",
)
def convtransformer_emformer_base(args):
convtransformer_espnet(args)
| 7,162 | 33.941463 | 123 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/tests/test_text_models.py
|
import argparse
import unittest
from typing import Any, Dict
import torch
from examples.simultaneous_translation.models import (
transformer_monotonic_attention
)
from tests.test_roberta import FakeTask
DEFAULT_CONFIG = {
"attention_eps": 1e-6,
"mass_preservation": True,
"noise_type": "flat",
"noise_mean": 0.0,
"noise_var": 1.0,
"energy_bias_init": -2,
"energy_bias": True
}
PAD_INDEX = 1
def generate_config(overrides_kv):
new_dict = {key: value for key, value in DEFAULT_CONFIG.items()}
for key, value in overrides_kv.items():
new_dict[key] = value
return new_dict
def make_sample_with_padding(longer_src=False) -> Dict[str, Any]:
tokens_1 = torch.LongTensor(
[
[2, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 2],
[
2, 11, 12, 14, 15, 10, 11, 12, 13, 14, 15, 2,
PAD_INDEX, PAD_INDEX
],
]
)
tokens_2 = torch.LongTensor(
[
[2, 11, 12, 13, 14, 2, PAD_INDEX, PAD_INDEX],
[2, 11, 22, 33, 2, PAD_INDEX, PAD_INDEX, PAD_INDEX]
]
)
if longer_src:
src_tokens = tokens_1[:, 1:]
prev_output_tokens = tokens_2
else:
src_tokens = tokens_2[:, 1:8]
prev_output_tokens = tokens_1
src_lengths = src_tokens.ne(PAD_INDEX).sum(dim=1).long()
sample = {
"net_input": {
"src_tokens": src_tokens,
"prev_output_tokens": prev_output_tokens,
"src_lengths": src_lengths,
},
"target": prev_output_tokens[:, 1:],
}
return sample
def build_transformer_monotonic_attention(**extra_args: Any):
overrides = {
# Use characteristics dimensions
"encoder_embed_dim": 12,
"encoder_ffn_embed_dim": 14,
"decoder_embed_dim": 12,
"decoder_ffn_embed_dim": 14,
# Disable dropout so we have comparable tests.
"dropout": 0,
"attention_dropout": 0,
"activation_dropout": 0,
"encoder_layerdrop": 0,
}
overrides.update(extra_args)
# Overrides the defaults from the parser
args = argparse.Namespace(**overrides)
transformer_monotonic_attention.monotonic_tiny_architecture(args)
torch.manual_seed(0)
task = FakeTask(args)
return (
transformer_monotonic_attention
.TransformerModelSimulTrans
.build_model(args, task)
)
def expected_alignment_formula(
p_choose,
mass_perservation=True,
padding_mask=None
):
# Online and Linear-Time Attention by Enforcing Monotonic Alignments
# https://arxiv.org/pdf/1704.00784.pdf
# Eq 18, 19
bsz, tgt_len, src_len = p_choose.size()
alpha = torch.zeros_like(p_choose)
if padding_mask is not None:
bsz_pad = padding_mask.size(0)
num_heads = int(bsz / bsz_pad)
padding_mask = (
padding_mask
.unsqueeze(1)
.expand([bsz_pad, num_heads, src_len])
.contiguous()
.view(-1, src_len)
)
p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0)
for bsz_i in range(bsz):
for i in range(tgt_len):
for j in range(src_len):
if i == 0:
if j == 0:
# First source token
alpha[bsz_i, i, j] = p_choose[bsz_i, i, j]
else:
# First target token
alpha[bsz_i, i, j] = (
p_choose[bsz_i, i, j]
* torch.prod(
1 - p_choose[bsz_i, i, :j]
)
)
else:
alpha[bsz_i, i, j] = alpha[bsz_i, i - 1, j]
for k in range(j):
alpha[bsz_i, i, j] += (
alpha[bsz_i, i - 1, k]
* torch.prod(
1 - p_choose[bsz_i, i, k:j]
)
)
alpha[bsz_i, i, j] *= p_choose[bsz_i, i, j]
alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0)
if mass_perservation:
alpha = mass_perservation_formula(alpha, False, padding_mask)
return alpha
def mass_perservation_formula(alpha, left_padding=False, padding_mask=None):
if padding_mask is None or alpha.size(-1) == 1:
if alpha.size(-1) > 1:
alpha[:, :, -1] = 1 - alpha[:, :, :-1].sum(dim=-1)
return alpha
src_lens = (padding_mask.logical_not()).sum(dim=1).long()
bsz, tgt_len, src_len = alpha.size()
assert (
not left_padding
or (left_padding and (not padding_mask[:, 0].any()))
)
alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0)
for bsz_i in range(bsz):
if left_padding:
alpha[bsz_i, :, -1] = (
1 - alpha[bsz_i, :, :-1].sum(dim=-1)
)
else:
alpha[bsz_i, :, src_lens[bsz_i] - 1] = (
1 - alpha[bsz_i, :, :src_lens[bsz_i] - 1].sum(dim=-1)
)
return alpha
def expected_soft_attention_formula(
alpha,
soft_energy,
padding_mask=None,
chunksize=1e10,
):
# Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
# https://arxiv.org/pdf/1906.05218.pdf
# Eq 14
# Monotonic Chunkwise Attention
# https://arxiv.org/abs/1712.05382
# Eq 17
bsz, tgt_len, src_len = alpha.size()
beta = torch.zeros_like(alpha)
if padding_mask is not None:
bsz_pad = padding_mask.size(0)
num_heads = int(bsz / bsz_pad)
# Expanding for potential head dimension
padding_mask = (
padding_mask
.unsqueeze(1)
.expand([bsz_pad, num_heads, src_len])
.contiguous()
.view(-1, src_len)
)
soft_energy = soft_energy.masked_fill(padding_mask.unsqueeze(1), float('-inf'))
for bsz_i in range(bsz):
for i in range(tgt_len):
for j in range(src_len):
for k in range(j, min([src_len, j + chunksize])):
if not padding_mask[bsz_i, j]:
beta[bsz_i, i, j] += (
alpha[bsz_i, i, k] * torch.exp(soft_energy[bsz_i, i, j])
/ torch.sum(torch.exp(soft_energy[bsz_i, i, max([0, k - chunksize + 1]):k + 1]))
)
return beta
class MonotonicAttentionTestAbstractClass(object):
def test_forward(self):
sample = make_sample_with_padding()
out, _ = self.model.forward(**sample["net_input"])
loss = out.sum()
loss.backward()
def test_p_choose(self):
sample = make_sample_with_padding()
_, extra_out = self.model.forward(**sample["net_input"])
for item in extra_out.attn_list:
p_choose = item["p_choose"]
self.assertTrue(p_choose.le(1.0).all())
self.assertTrue(p_choose.ge(0.0).all())
def test_expected_alignment(self):
for longer_src in [True, False]:
sample = make_sample_with_padding(longer_src)
_, extra_out = self.model.forward(**sample["net_input"])
for item in extra_out.attn_list:
p_choose = item["p_choose"]
alpha_system = item["alpha"]
self.assertTrue(p_choose.size() == alpha_system.size())
bsz, num_head, tgt_len, src_len = alpha_system.size()
alpha_system = alpha_system.view(-1, tgt_len, src_len)
p_choose = p_choose.view(-1, tgt_len, src_len)
alpha_real = expected_alignment_formula(
p_choose,
self.model.decoder.layers[0].encoder_attn.mass_preservation,
sample["net_input"]["src_tokens"].eq(PAD_INDEX)
)
self.assertTrue(
torch.abs(alpha_system - alpha_real).le(5e-5).all(),
)
class HardMonotonicAttentionTestCase(
unittest.TestCase,
MonotonicAttentionTestAbstractClass
):
def setUp(self):
self.model = build_transformer_monotonic_attention(
**generate_config({"simul_type": "hard_aligned"})
)
class InfiniteLookbackTestCase(
unittest.TestCase,
MonotonicAttentionTestAbstractClass
):
def setUp(self):
self.model = build_transformer_monotonic_attention(
**generate_config(
{
"simul_type": "infinite_lookback"
}
)
)
self.model.train()
def test_fp16_for_long_input(self):
sample = {
"net_input": {
"src_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0),
"prev_output_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0),
"src_lengths": torch.LongTensor([1000]).cuda(),
},
"target": torch.LongTensor([2] + [7] * 1000).unsqueeze(0).cuda()
}
self.model.cuda().half()
_, extra_out = self.model.forward(**sample["net_input"])
for item in extra_out.attn_list:
for key in ["p_choose", "alpha", "beta", "soft_energy"]:
self.assertFalse(torch.isnan(item[key]).any())
def test_expected_attention(self):
for longer_src in [True, False]:
sample = make_sample_with_padding(longer_src)
_, extra_out = self.model.forward(**sample["net_input"])
for item in extra_out.attn_list:
p_choose = item["p_choose"]
alpha_system = item["alpha"]
beta_system = item["beta"]
soft_energy_system = item["soft_energy"]
self.assertTrue(beta_system.size() == alpha_system.size())
self.assertTrue(p_choose.size() == alpha_system.size())
bsz, num_head, tgt_len, src_len = alpha_system.size()
alpha_system = alpha_system.view(-1, tgt_len, src_len)
beta_system = beta_system.view(-1, tgt_len, src_len)
p_choose = p_choose.view(-1, tgt_len, src_len)
soft_energy_system = soft_energy_system.view(-1, tgt_len, src_len)
alpha_real = expected_alignment_formula(
p_choose,
self.model.decoder.layers[0].encoder_attn.mass_preservation,
sample["net_input"]["src_tokens"].eq(PAD_INDEX)
)
beta_real = expected_soft_attention_formula(
alpha_real,
soft_energy_system,
sample["net_input"]["src_tokens"].eq(PAD_INDEX),
chunksize=getattr(
self.model.decoder.layers[0].encoder_attn,
"chunk_size",
int(1e10)
)
)
self.assertTrue(
torch.abs(beta_system - beta_real).le(1e-5).all(),
)
class ChunkwiswTestCase(
InfiniteLookbackTestCase
):
def setUp(self):
self.model = build_transformer_monotonic_attention(
**generate_config(
{
"simul_type": "chunkwise",
"mocha_chunk_size": 3
}
)
)
class WaitkTestCase(InfiniteLookbackTestCase):
def setUp(self):
self.model = build_transformer_monotonic_attention(
**generate_config(
{
"simul_type": "waitk",
"waitk_lagging": 3,
}
)
)
def check_waitk(self, p_choose, lagging, padding_mask):
bsz, tgt_len, src_len = p_choose.size()
for bsz_i in range(bsz):
for i in range(tgt_len):
for j in range(src_len):
if not padding_mask[bsz_i, j]:
if j - i == lagging - 1:
self.assertTrue(p_choose[bsz_i, i, j] == 1)
else:
self.assertTrue(p_choose[bsz_i, i, j] == 0)
def test_waitk_p_choose(self):
for longer_src in [True, False]:
for k in [1, 3, 10, 20, 100]:
sample = make_sample_with_padding(longer_src)
model = build_transformer_monotonic_attention(
**generate_config(
{
"simul_type": "waitk",
"waitk_lagging": k,
}
)
)
model.train()
_, extra_out = model.forward(**sample["net_input"])
for item in extra_out.attn_list:
p_choose = item["p_choose"]
bsz, num_heads, tgt_len, src_len = p_choose.size()
padding_mask = sample["net_input"]["src_tokens"].eq(PAD_INDEX)
padding_mask = (
padding_mask
.unsqueeze(1)
.expand([bsz, num_heads, src_len])
.contiguous()
.view(-1, src_len)
)
p_choose = p_choose.view(bsz * num_heads, tgt_len, src_len)
self.check_waitk(p_choose, k, padding_mask)
| 13,524 | 32.14951 | 108 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/tests/test_alignment_train.py
|
import unittest
import numpy as np
import torch
import hypothesis.strategies as st
from hypothesis import assume, given, settings
from torch.testing._internal.common_utils import TestCase
from examples.simultaneous_translation.utils.functions import exclusive_cumprod
TEST_CUDA = torch.cuda.is_available()
class AlignmentTrainTest(TestCase):
def _test_custom_alignment_train_ref(self, p_choose, eps):
cumprod_1mp = exclusive_cumprod(1 - p_choose, dim=2, eps=eps)
cumprod_1mp_clamp = torch.clamp(cumprod_1mp, eps, 1.0)
bsz = p_choose.size(0)
tgt_len = p_choose.size(1)
src_len = p_choose.size(2)
alpha_0 = p_choose.new_zeros([bsz, 1, src_len])
alpha_0[:, :, 0] = 1.0
previous_alpha = [alpha_0]
for i in range(tgt_len):
# p_choose: bsz , tgt_len, src_len
# cumprod_1mp_clamp : bsz, tgt_len, src_len
# previous_alpha[i]: bsz, 1, src_len
# alpha_i: bsz, src_len
alpha_i = (
p_choose[:, i]
* cumprod_1mp[:, i]
* torch.cumsum(
previous_alpha[i][:, 0] / cumprod_1mp_clamp[:, i], dim=1
)
).clamp(0, 1.0)
previous_alpha.append(alpha_i.unsqueeze(1))
# alpha: bsz * num_heads, tgt_len, src_len
alpha = torch.cat(previous_alpha[1:], dim=1)
return alpha
def _test_custom_alignment_train_impl(self, p_choose, alpha, eps):
if p_choose.is_cuda:
from alignment_train_cuda_binding import alignment_train_cuda # @manual=//deeplearning/projects/fairseq-py:alignment_train_cuda_binding
alignment_train_cuda(p_choose, alpha, eps)
else:
from alignment_train_cpu_binding import alignment_train_cpu # @manual=//deeplearning/projects/fairseq-py:alignment_train_cpu_binding
alignment_train_cpu(p_choose, alpha, eps)
@settings(deadline=None)
@given(
bsz=st.integers(1, 100),
tgt_len=st.integers(1, 100),
src_len=st.integers(1, 550),
device=st.sampled_from(["cpu", "cuda"]),
)
def test_alignment_train(self, bsz, tgt_len, src_len, device):
eps = 1e-6
assume(device == "cpu" or TEST_CUDA)
p_choose = torch.rand(bsz, tgt_len, src_len, device=device)
# run the alignment with the custom operator
alpha_act = p_choose.new_zeros([bsz, tgt_len, src_len])
self._test_custom_alignment_train_impl(p_choose, alpha_act, eps)
# runu the alignment with the ref implementation
alpha_ref = self._test_custom_alignment_train_ref(p_choose, eps)
# verify the results
alpha_act = alpha_act.cpu().detach().numpy()
alpha_ref = alpha_ref.cpu().detach().numpy()
np.testing.assert_allclose(
alpha_act,
alpha_ref,
atol=1e-3,
rtol=1e-3,
)
if __name__ == "__main__":
unittest.main()
| 2,989 | 32.595506 | 148 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/utils/monotonic_attention.py
|
from typing import Optional
import torch
from torch import Tensor
from examples.simultaneous_translation.utils.functions import (
exclusive_cumprod,
prob_check,
moving_sum,
)
def expected_alignment_from_p_choose(
p_choose: Tensor,
padding_mask: Optional[Tensor] = None,
eps: float = 1e-6
):
"""
Calculating expected alignment for from stepwise probability
Reference:
Online and Linear-Time Attention by Enforcing Monotonic Alignments
https://arxiv.org/pdf/1704.00784.pdf
q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j}
a_ij = p_ij q_ij
Parallel solution:
ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi))
============================================================
Expected input size
p_choose: bsz, tgt_len, src_len
"""
prob_check(p_choose)
# p_choose: bsz, tgt_len, src_len
bsz, tgt_len, src_len = p_choose.size()
dtype = p_choose.dtype
p_choose = p_choose.float()
if padding_mask is not None:
p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0.0)
if p_choose.is_cuda:
p_choose = p_choose.contiguous()
from alignment_train_cuda_binding import alignment_train_cuda as alignment_train
else:
from alignment_train_cpu_binding import alignment_train_cpu as alignment_train
alpha = p_choose.new_zeros([bsz, tgt_len, src_len])
alignment_train(p_choose, alpha, eps)
# Mix precision to prevent overflow for fp16
alpha = alpha.type(dtype)
prob_check(alpha)
return alpha
def expected_soft_attention(
alpha: Tensor,
soft_energy: Tensor,
padding_mask: Optional[Tensor] = None,
chunk_size: Optional[int] = None,
eps: float = 1e-10
):
"""
Function to compute expected soft attention for
monotonic infinite lookback attention from
expected alignment and soft energy.
Reference:
Monotonic Chunkwise Attention
https://arxiv.org/abs/1712.05382
Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
https://arxiv.org/abs/1906.05218
alpha: bsz, tgt_len, src_len
soft_energy: bsz, tgt_len, src_len
padding_mask: bsz, src_len
left_padding: bool
"""
if padding_mask is not None:
alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0)
soft_energy = soft_energy.masked_fill(
padding_mask.unsqueeze(1), -float("inf")
)
prob_check(alpha)
dtype = alpha.dtype
alpha = alpha.float()
soft_energy = soft_energy.float()
soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0]
exp_soft_energy = torch.exp(soft_energy) + eps
if chunk_size is not None:
# Chunkwise
beta = (
exp_soft_energy
* moving_sum(
alpha / (eps + moving_sum(exp_soft_energy, chunk_size, 1)),
1, chunk_size
)
)
else:
# Infinite lookback
# Notice that infinite lookback is a special case of chunkwise
# where chunksize = inf
inner_items = alpha / (eps + torch.cumsum(exp_soft_energy, dim=2))
beta = (
exp_soft_energy
* torch.cumsum(inner_items.flip(dims=[2]), dim=2)
.flip(dims=[2])
)
if padding_mask is not None:
beta = beta.masked_fill(
padding_mask.unsqueeze(1).to(torch.bool), 0.0)
# Mix precision to prevent overflow for fp16
beta = beta.type(dtype)
beta = beta.clamp(0, 1)
prob_check(beta)
return beta
def mass_preservation(
alpha: Tensor,
padding_mask: Optional[Tensor] = None,
left_padding: bool = False
):
"""
Function to compute the mass perservation for alpha.
This means that the residual weights of alpha will be assigned
to the last token.
Reference:
Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
https://arxiv.org/abs/1906.05218
alpha: bsz, tgt_len, src_len
padding_mask: bsz, src_len
left_padding: bool
"""
prob_check(alpha)
if padding_mask is not None:
if not left_padding:
assert not padding_mask[:, 0].any(), (
"Find padding on the beginning of the sequence."
)
alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0.0)
if left_padding or padding_mask is None:
residuals = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0, 1)
alpha[:, :, -1] = residuals
else:
# right padding
_, tgt_len, src_len = alpha.size()
residuals = 1 - alpha.sum(dim=-1, keepdim=True).clamp(0, 1)
src_lens = src_len - padding_mask.sum(dim=1, keepdim=True)
src_lens = src_lens.expand(-1, tgt_len).contiguous()
# add back the last value
residuals += alpha.gather(2, src_lens.unsqueeze(2) - 1)
alpha = alpha.scatter(2, src_lens.unsqueeze(2) - 1, residuals)
prob_check(alpha)
return alpha
| 4,975 | 26.491713 | 88 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.