repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/utils/p_choose_strategy.py
|
from typing import Optional, Dict
from torch import Tensor
import torch
def waitk_p_choose(
tgt_len: int,
src_len: int,
bsz: int,
waitk_lagging: int,
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None
):
max_src_len = src_len
if incremental_state is not None:
# Retrieve target length from incremental states
# For inference the length of query is always 1
max_tgt_len = incremental_state["steps"]["tgt"]
assert max_tgt_len is not None
max_tgt_len = int(max_tgt_len)
else:
max_tgt_len = tgt_len
if max_src_len < waitk_lagging:
if incremental_state is not None:
max_tgt_len = 1
return torch.zeros(
bsz, max_tgt_len, max_src_len
)
# Assuming the p_choose looks like this for wait k=3
# src_len = 6, max_tgt_len = 5
# [0, 0, 1, 0, 0, 0, 0]
# [0, 0, 0, 1, 0, 0, 0]
# [0, 0, 0, 0, 1, 0, 0]
# [0, 0, 0, 0, 0, 1, 0]
# [0, 0, 0, 0, 0, 0, 1]
# linearize the p_choose matrix:
# [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0...]
# The indices of linearized matrix that equals 1 is
# 2 + 6 * 0
# 3 + 6 * 1
# ...
# n + src_len * n + k - 1 = n * (src_len + 1) + k - 1
# n from 0 to max_tgt_len - 1
#
# First, generate the indices (activate_indices_offset: bsz, max_tgt_len)
# Second, scatter a zeros tensor (bsz, max_tgt_len * src_len)
# with activate_indices_offset
# Third, resize the tensor to (bsz, max_tgt_len, src_len)
activate_indices_offset = (
(
torch.arange(max_tgt_len) * (max_src_len + 1)
+ waitk_lagging - 1
)
.unsqueeze(0)
.expand(bsz, max_tgt_len)
.long()
)
if key_padding_mask is not None:
if key_padding_mask[:, 0].any():
# Left padding
activate_indices_offset += (
key_padding_mask.sum(dim=1, keepdim=True)
)
# Need to clamp the indices that are too large
activate_indices_offset = (
activate_indices_offset
.clamp(
0,
min(
[
max_tgt_len,
max_src_len - waitk_lagging + 1
]
) * max_src_len - 1
)
)
p_choose = torch.zeros(bsz, max_tgt_len * max_src_len)
p_choose = p_choose.scatter(
1,
activate_indices_offset,
1.0
).view(bsz, max_tgt_len, max_src_len)
if key_padding_mask is not None:
p_choose = p_choose.to(key_padding_mask)
p_choose = p_choose.masked_fill(key_padding_mask.unsqueeze(1), 0)
if incremental_state is not None:
p_choose = p_choose[:, -1:]
return p_choose.float()
def learnable_p_choose(
energy,
noise_mean: float = 0.0,
noise_var: float = 0.0,
training: bool = True
):
"""
Calculating step wise prob for reading and writing
1 to read, 0 to write
energy: bsz, tgt_len, src_len
"""
noise = 0
if training:
# add noise here to encourage discretness
noise = (
torch.normal(noise_mean, noise_var, energy.size())
.type_as(energy)
.to(energy.device)
)
p_choose = torch.sigmoid(energy + noise)
# p_choose: bsz * self.num_heads, tgt_len, src_len
return p_choose
| 3,445 | 26.133858 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/utils/functions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def prob_check(tensor, eps=1e-10):
assert not torch.isnan(tensor).any(), (
"Nan in a probability tensor."
)
# Add the eps here to prevent errors introduced by precision
assert tensor.le(1.0 + eps).all() and tensor.ge(0.0 - eps).all(), (
"Incorrect values in a probability tensor"
", 0.0 <= tensor <= 1.0"
)
def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
Implementing exclusive cumprod.
There is cumprod in pytorch, however there is no exclusive mode.
cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i]
exclusive means
cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]
"""
tensor_size = list(tensor.size())
tensor_size[dim] = 1
return_tensor = safe_cumprod(
torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim),
dim=dim,
eps=eps,
)
if dim == 0:
return return_tensor[:-1]
elif dim == 1:
return return_tensor[:, :-1]
elif dim == 2:
return return_tensor[:, :, :-1]
else:
raise RuntimeError(
"Cumprod on dimension 3 and more is not implemented"
)
def safe_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
An implementation of cumprod to prevent precision issue.
cumprod(x)
= [x1, x1x2, x1x2x3, ....]
= [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...]
= exp(cumsum(log(x)))
"""
if (tensor + eps < 0).any().item():
raise RuntimeError(
"Safe cumprod can only take non-negative tensors as input."
"Consider use torch.cumprod if you want to calculate negative values."
)
log_tensor = torch.log(tensor + eps)
cumsum_log_tensor = torch.cumsum(log_tensor, dim)
exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)
return exp_cumsum_log_tensor
def moving_sum(x, start_idx: int, end_idx: int):
"""
From MONOTONIC CHUNKWISE ATTENTION
https://arxiv.org/pdf/1712.05382.pdf
Equation (18)
x = [x_1, x_2, ..., x_N]
MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m
for n in {1, 2, 3, ..., N}
x : src_len, batch_size
start_idx : start idx
end_idx : end idx
Example
src_len = 5
batch_size = 3
x =
[[ 0, 5, 10],
[ 1, 6, 11],
[ 2, 7, 12],
[ 3, 8, 13],
[ 4, 9, 14]]
MovingSum(x, 3, 1) =
[[ 0, 5, 10],
[ 1, 11, 21],
[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39]]
MovingSum(x, 1, 3) =
[[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39],
[ 7, 17, 27],
[ 4, 9, 14]]
"""
# TODO: Make dimension configurable
assert start_idx > 0 and end_idx > 0
batch_size, tgt_len, src_len = x.size()
x = x.view(-1, src_len).unsqueeze(1)
# batch_size, 1, src_len
moving_sum_weight = torch.ones([1, 1, end_idx + start_idx - 1]).type_as(x)
moving_sum = torch.nn.functional.conv1d(
x, moving_sum_weight, padding=start_idx + end_idx - 1
).squeeze(1)
moving_sum = moving_sum[:, end_idx:-start_idx]
assert src_len == moving_sum.size(1)
assert batch_size * tgt_len == moving_sum.size(0)
moving_sum = moving_sum.view(batch_size, tgt_len, src_len)
return moving_sum
| 3,535 | 27.063492 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/simultaneous_translation/utils/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the criterions/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("examples.simultaneous_translation.utils." + module)
| 520 | 33.733333 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/SL_topic_detection/analysis_outputs.py
|
# Script for analizing outputs produced by the models.
import math
from typing import Tuple
from typing import Union
from typing import List
from copy import deepcopy
import argparse
import ast
import pandas as pd
from sympy import ShapeError
import numpy as np
from sklearn import manifold
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import PrecisionRecallDisplay
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import classification_report
from sklearn.calibration import calibration_curve, CalibrationDisplay
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import cm
import torch
import torchvision
import torchvision.transforms.functional as F
from PIL import ImageFont, ImageDraw, ImageOps
# from transformer_contributions_nmt.wrappers.transformer_wrapper import FairseqTransformerHub
tab10 = [
"#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd",
"#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"
]
def load_data_dict(file_path: str) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
data = torch.load(file_path, map_location=torch.device('cpu'))
if type(data) == type(dict()):
return data # ['embeddings'], data['targets'], data['preds'], data['att_time'], ['att_inputs']
raise TypeError(f'Expected data container to be of type `{type(dict)}` but got `{type(data)}` instead.')
def plot_precision_recall(
targets_binary: List[int],
preds_binary: List[int],
model: str,
data_type: str,
split: str,
average: str,
) -> None:
precision = dict()
recall = dict()
average_precision = dict()
precision[average], recall[average], _ = precision_recall_curve(
targets_binary.ravel(), preds_binary.ravel()
)
average_precision[average] = average_precision_score(targets_binary, preds_binary, average=average)
display = PrecisionRecallDisplay(
recall=recall[average],
precision=precision[average],
average_precision=average_precision[average],
)
display.plot()
_ = display.ax_.set_title(f"{average}-average; {model} - {data_type} - {split}")
plt.savefig(f'./outputs/{average}-average_precision_recall_{model}_{data_type}_{split}.png')
plt.close()
for i in range(10):
precision[i], recall[i], _ = precision_recall_curve(targets_binary[:, i], preds_binary[:, i])
average_precision[i] = average_precision_score(targets_binary[:, i], preds_binary[:, i])
_, ax = plt.subplots(figsize=(8, 8))
for i, color in zip(range(10), tab10):
display = PrecisionRecallDisplay(
recall=recall[i],
precision=precision[i],
average_precision=average_precision[i],
)
display.plot(ax=ax, name=f"class {i}", color=color)
display = PrecisionRecallDisplay(
recall=recall[average],
precision=precision[average],
average_precision=average_precision[average],
)
display.plot(ax=ax, name=f"{average}-average precision-recall", color="gold")
handles, labels = display.ax_.get_legend_handles_labels()
# set the legend and the axes
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.legend(handles=handles, labels=labels, loc="best")
ax.set_title(f'{model} - {data_type} - {split}')
plt.savefig(f'./outputs/{average}-average_precision_recall_multiclass_{model}_{data_type}_{split}.png')
plt.close()
def calibration_plots(
targets: Union[List[int], torch.Tensor],
logits: torch.Tensor,
model: str,
data_type: str,
split: str,
) -> None:
print(targets)
print(logits)
logits = logits.squeeze()
# fig = plt.figure(figsize=(12, 19))
# # gs = GridSpec(10, 2)
# # ax_calibration_curve = fig.add_subplot(gs[:2, :2])
# # calibration_displays = {}
# for categ in range(10):
# targets_binary = [1 if categ == int(tgt) else 0 for tgt in targets]
# logits_categ = logits[:, categ].squeeze().tolist()
# prob_true, prob_pred = calibration_curve(targets_binary, logits_categ, n_bins=10, normalize=True,)
# display = CalibrationDisplay(
# prob_true,
# prob_pred,
# logits_categ,
# )
# display.plot(ax=ax_calibration_curve, name=f"class {categ}", color=tab10[categ],)
# calibration_displays[categ] = display
gs = GridSpec(2, 1)
fig = plt.figure(figsize=(8, 8))
ax_calibration_curve = fig.add_subplot(gs[0, 0])
logits_list = logits.tolist()
targets_list = []
for tgt in targets.tolist():
targets_list.append([1 if categ == int(tgt) else 0 for categ in range(1, 11)])
targets_list = targets.tolist()
# logits_list = logits.tolist()
print(targets_list)
print(logits_list)
prob_true, prob_pred = calibration_curve(targets_list, logits_list, n_bins=10, normalize=True,)
display = CalibrationDisplay(
prob_true,
prob_pred,
logits_list,
)
display.plot(
ax=ax_calibration_curve,
name=f"{model}",
color=tab10[categ],
)
plt.grid()
plt.title(f'{model} - {data_type} - {split}')
ax = fig.add_subplot(gs[1, 0])
ax.hist(
display.y_prob,
range=(0, 1),
bins=10,
label=categ+1,
color=tab10[categ],
)
ax.set(xlabel="Mean predicted probability", ylabel="Count")
# # Add histogram
# grid_positions = [(i, j) for i in range(2, 10) for j in range(0, 2)]
# for categ in range(10):
# row, col = grid_positions[categ]
# ax = fig.add_subplot(gs[row, col])
# ax.hist(
# calibration_displays[categ].y_prob,
# range=(0, 1),
# bins=10,
# label=categ+1,
# color=tab10[categ],
# )
# ax.set(title=categ, xlabel="Mean predicted probability", ylabel="Count")
plt.tight_layout()
plt.savefig(f'./outputs/calibration_multiclass_{model}_{data_type}_{split}.png')
plt.close()
def plot_confusion_matrix(
targets: Union[List[int], torch.Tensor],
preds: Union[List[int], torch.Tensor],
model: str,
data_type: str,
split: str,
) -> None:
disp = ConfusionMatrixDisplay.from_predictions(targets, preds, cmap=plt.cm.Blues, colorbar=False)
disp.figure_.suptitle(f'{model} - {data_type} - {split}')
plt.savefig(f'./outputs/confusion_matrix_{model}_{data_type}_{split}.png')
plt.close()
def metrics_to_csv(
targets: Union[List[int], torch.Tensor],
preds: Union[List[int], torch.Tensor],
model: str,
data_type: str,
split: str,
) -> None:
report = classification_report(
targets,
preds,
# labels=[i for i in range(1, 11)],
# target_names=[i for i in range(1, 11)],
digits=4,
output_dict=True,
zero_division='warn',
)
report = pd.DataFrame.from_dict(report, orient='columns').transpose()
report.to_csv(f'./outputs/metrics_report_{model}_{data_type}_{split}.csv')
support = report.pop('support')
report, weighted_avg = report.drop(report.tail(1).index),report.tail(1)
report, macro_avg = report.drop(report.tail(1).index),report.tail(1)
report, accuracy = report.drop(report.tail(1).index),report.tail(1)
report = report.append(weighted_avg)
report = report.append(macro_avg)
accuracy = accuracy.iloc[0,0]
ax = report.plot.bar(
rot=0,
width=0.7,
edgecolor='white',
linewidth=1.5,
color=["#ff7f0e", "#bcbd22", "#8c564b"],
figsize=(11, 5),
)
ax.axes.set_xlim(-0.5,11.5)
leg1 = ax.legend(
loc='upper center',
bbox_to_anchor=(0.5, 1.08),
ncol=3,
fancybox=True,
shadow=True
)
leg2 = ax.legend(
[f"accuracy = " + "{:.2f}".format(accuracy*100)],
handles=[
Line2D(
[0], [0], marker='o', color='w', label=f"accuracy = " + "{:.2f} %".format(accuracy*100),
markerfacecolor='g', markersize=0)
],
loc='upper center',
bbox_to_anchor=(0.85, 1.065),
borderaxespad=0,
fontsize='x-large',
frameon=False,
)
ax.add_artist(leg1)
plt.xticks([i for i in range(12)], [i for i in range(1, 11)] + ['w_avg', 'macro_avg'])
plt.savefig(f'./outputs/metrics_barchart_{model}_{data_type}_{split}.png')
plt.close()
ax = support.iloc[0:10].plot.bar(rot=0, width=0.7, edgecolor='white', linewidth=1, color=tab10)
ax.set_title(f"Samples per class in {split} set")
plt.xticks([i for i in range(10)], [i for i in range(1, 11)])
plt.savefig(f'./outputs/metrics_support_{model}_{data_type}_{split}.png')
plt.close()
def analysis_of_errors(
targets: Union[List[int], torch.Tensor],
preds: Union[List[int], torch.Tensor],
logits: Union[List[float], torch.Tensor],
labels: List[str],
model: str,
data_type: str,
split: str,
) -> None:
from sklearn.metrics import precision_recall_fscore_support as score
# targets = [1,2,3,4,5,6,7,8,9,0,9,8,7,6,5,1,2,1,1,4,1]
# preds = [1,2,3,4,5,6,7,8,9,0,9,8,7,6,5,1,2,1,1,4,5]
# logits = []
# labels = [f'class_{i}' for i in range(10)]
# for i in range(21):
# array = np.random.normal(0.5, 10, 10)
# array /= np.sum(array)
# logits.append(array.tolist())
# logits = torch.tensor(logits)
from sklearn.preprocessing import label_binarize
# Use label_binarize to fit into a multilabel setting
targets_binary = label_binarize(targets, classes=[i for i in range(10)])
preds_binary = label_binarize(preds, classes=[i for i in range(10)])
# TODO: make sure that targets and preds are binarized the same way
for average in ['micro', 'macro']:
plot_precision_recall(
targets_binary=targets_binary,
preds_binary=preds_binary,
model=model,
data_type=data_type,
split=split,
average=average,
)
plot_confusion_matrix(
targets=targets,
preds=preds,
model=model,
data_type=data_type,
split=split,
)
metrics_to_csv(
targets=targets,
preds=preds,
model=model,
data_type=data_type,
split=split,
)
def plot_att_time(
att_time: torch.Tensor,
video_id: str,
model: str,
data_type: str,
) -> None:
att_time = att_time[0] # TODO: remove this
print(att_time)
if att_time.shape[0] == 1 and len(att_time.shape) == 2:
att_time -= att_time.min(1, keepdim=True)[0]
att_time /= att_time.max(1, keepdim=True)[0]
elif len(att_time.shape) == 1:
att_time -= att_time.min(0, keepdim=True)[0]
att_time /= att_time.max(0, keepdim=True)[0]
else:
raise ShapeError('Please pass in a tensor corresponding to just one sample')
if model == 'transformer':
raise TypeError('Temporal attention visualization is not implemented for Transformer, only for PerceiverIO and LSTM.')
att_time = att_time.tolist()
print(f'len(att_time) = {len(att_time)}')
text_timestamps = pd.read_csv(
(
'/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/'
'train_csv_frames_redone_2.csv'
),
sep = '\t'
)
text_timestamps = text_timestamps[text_timestamps['VIDEO_ID'] == video_id]
text_timestamps = text_timestamps.sort_values(by=['START_FRAME'])
word_timestamp_list = []
if data_type == 'text': # TODO: retrieve tokenized text to visualize it
pass
else:
for index, row in text_timestamps.iterrows():
print(f'index = {index}')
text = row['SENTENCE']
words = text.split(' ')
l = len(words)
start_frame = row['START_FRAME'] - 8 if data_type == 'i3d' else row['START_FRAME']
end_frame = row['END_FRAME'] - 8 if data_type == 'i3d' else row['END_FRAME']
delta_t = end_frame - start_frame
delta_w = delta_t / l
word_frame = [(math.floor(i * delta_w), word) for i, word in enumerate(words)]
expand = max(1, math.floor((delta_t / 100) ** 1.2))
print(f'expand = {expand}')
x = [i for i in range(0, delta_t, 1)]
y = [1 for _ in range(0, delta_t, 1)]
print(f'(start_frame, end_frame) = {(start_frame, end_frame)}')
c = att_time[start_frame:end_frame]
# viz. weights
dpi = 120
fig, ax = plt.subplots(figsize=(1000 / dpi * expand, 1000 / dpi * expand), dpi=dpi)
ax.axis('off')
ax.scatter(x, y, c=c, alpha=0.8, marker='s', s=1)
for i, word in word_frame:
print(f'i = {i}')
ax.annotate(word, (x[i], y[i]))
plt.set_cmap('YlOrBr')
plt.savefig(f'./outputs/attention_{model}_{data_type}_{start_frame}_{end_frame}.png')
plt.close()
pass
def viz_att(
att_time: torch.Tensor,
video_path: str,
csv_path:str,
model: str,
data_type: str,
) -> None:
"""
To visualize attentions on video:
* LSTM, PerceiverIO, Transformer:
1. do inference on the video's data with models trained on Calcula
2. load those att_time weights and pass them in to `viz_att` function
* TransformerCLS:
1. do inference on the video's data with transformerCLS model trained on Calcula.
It will be necessary to hand-code the call to get the ALTI contribution weights
2. load those ALTI weights and pass them in to `viz_att` function
"""
print(f'att_time.shape = {att_time.shape}')
if model == 'transformerCLS':
att_time_path = (
f'/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/fairseq-internal/'
f'examples/SL_topic_detection/outputs/att_time_ALTI_transformerCLS_{data_type}_train.pkl'
)
att_time = torch.load(att_time_path)
else:
att_time = att_time[0]
att_time = att_time.squeeze()
print(f'att_time.shape = {att_time.shape}')
if att_time.shape[0] == 1 and len(att_time.shape) == 2:
att_time -= att_time.min(1, keepdim=True)[0]
att_time /= att_time.max(1, keepdim=True)[0]
att_time /= att_time.mean(1, keepdim=True)[0]
elif len(att_time.shape) == 1:
att_time -= att_time.min()
att_time /= att_time.max()
att_time /= att_time.mean()
else:
raise ShapeError('Please pass in a tensor corresponding to just one sample')
# gloss_timestamp = pd.read_csv(csv_path, sep=';')
text_font = ImageFont.truetype('/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/att_viz/train/Verdana.ttf', 33)
if data_type == 'spot_align':
csv_path = '/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/spot_align/train.csv'
mouthings = pd.read_csv(csv_path)
mouthings = mouthings[mouthings['VIDEO_ID'] == '-EsVrbRTMU4']
idx = [5, 9, 19, 28, 35, 45, 63, 75, 80, 86, 101, 105, 107]
words = list(mouthings['TEXT'])[0]
w_frames = list(mouthings['MOUTHING_FRAME'])[0]
words = ast.literal_eval(words)
w_frames = ast.literal_eval(w_frames)
words = [words[i] for i in idx]
w_frames = [w_frames[i] for i in idx]
for i, w, w_f in zip(idx, words, w_frames):
init_frame, end_frame = int(w_f - 10), int(w_f + 10)
print(f'init_frame, end_frame = {init_frame, end_frame}')
init_time, end_time = init_frame / 25, end_frame / 25
video = torchvision.io.read_video(
filename=video_path,
start_pts=init_time,
end_pts=end_time,
pts_unit='sec'
)
frame_rate = video[2]['video_fps']
video = video[0]
print(f'video.shape = {video.shape}')
print('%'*25)
out = []
# define color mapping for the att weights
N = 1000
cmap = cm.get_cmap('plasma', N)
# for each frame in the video range...
for t in range(min(end_frame - init_frame, video.shape[0])):
frame = video[t].permute(2, 0, 1)
frame = F.to_pil_image(frame)
image_editable = ImageDraw.Draw(frame)
image_editable.text(
(15,15), # (0, 0): upper left
w,
(0, 0, 0), # RGB
font=text_font,
)
# add sourrounding frame with weight's coloring to each of the frames in video
color_att = int(N * att_time[i])
colors = tuple(int(255 * c) for c in cmap(color_att)[0:3])
frame = ImageOps.expand(frame, border=66, fill=colors) # (0,0,0))
frame = F.pil_to_tensor(frame).permute(1, 2, 0)
out.append(frame)
# store to .mp4
out = torch.stack(out)
out_path = '/'.join(video_path.split('/')[:-1]) + '/' + f'{data_type}_{model}_viz_att_{init_frame}' + video_path.split('/')[-1]
print(out_path)
torchvision.io.write_video(
filename=out_path,
video_array=out,
fps=frame_rate,
video_codec='libx264',
)
else:
text_timestamps = pd.read_csv(
(
'/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/'
'train_csv_frames_redone_2.csv'
),
sep = '\t'
)
text_timestamps = text_timestamps[text_timestamps['VIDEO_ID'] == '-EsVrbRTMU4']
text_timestamps = text_timestamps.sort_values(by=['START_FRAME'])
if (
data_type in ['keypoints', 'mediapipe_keypoints', 'mediapipe_rotational'] and # TODO: all non-textual feats should have been downsampled!
model == 'transformer'
):
print(f'1 att_time.shape = {att_time.shape}')
att_time = att_time.repeat_interleave(9)
print(f'2 att_time.shape = {att_time.shape}')
elif (
data_type in ['keypoints', 'mediapipe_keypoints', 'rotational', 'mediapipe_rotational', 'i3d',] and
model == 'transformerCLS'
):
print(f'1 att_time.shape = {att_time.shape}')
att_time = torch.from_numpy(att_time).repeat_interleave(9)
print(f'2 att_time.shape = {att_time.shape}')
for i, row in text_timestamps.iterrows():
init_time, end_time = int(row['START_REALIGNED']), int(row['END_REALIGNED'])
init_frame, end_frame = int(row['START_FRAME']), int(row['END_FRAME'])
if init_frame > 1200:
break
if data_type == 'i3d':
init_frame, end_frame = max(0, init_frame - 8), max(0, end_frame - 8)
if end_frame == 0:
end_frame += 8
print(f'init_frame, end_frame = {init_frame, end_frame}')
### add subtitle to time range (init_time, end_time) ###
video = torchvision.io.read_video(
filename=video_path,
start_pts=init_time,
end_pts=end_time,
pts_unit='sec'
)
frame_rate = video[2]['video_fps']
video = video[0]
print(f'video.shape = {video.shape}')
print('%'*25)
out = []
# define color mapping for the att weights
N = 1000
cmap = cm.get_cmap('plasma', N)
# add subtitles...
text = row['SENTENCE']
print(f'len(text) = {len(text)}')
##
words = text.split()
new_text = ""
word_count = 0
for word in words:
new_text += word + " "
word_count += 1
if word_count == 11 or "." in word:
new_text += "\n"
word_count = 0
##
text = new_text
# for each frame in the video range...
for t in range(min(end_frame - init_frame, video.shape[0])):
# print(f't = {t}')
frame = video[t].permute(2, 0, 1)
# print(frame.shape)
frame = F.to_pil_image(frame)
image_editable = ImageDraw.Draw(frame)
image_editable.text(
(15,15), # (0, 0): upper left
text,
(0, 0, 0), # RGB
font=text_font,
)
# add sourrounding frame with att weight's coloring to each of the frames in video
color_att = int(N * att_time[init_frame + t])
colors = tuple(int(255 * c) for c in cmap(color_att)[0:3])
frame = ImageOps.expand(frame, border=66, fill=colors)
frame = F.pil_to_tensor(frame).permute(1, 2, 0)
out.append(frame)
# store to .mp4
out = torch.stack(out)
out_path = '/'.join(video_path.split('/')[:-1]) + '/' + f'{data_type}_{model}_viz_att_{init_frame}' + video_path.split('/')[-1]
print(out_path)
torchvision.io.write_video(
filename=out_path,
video_array=out,
fps=frame_rate,
video_codec='libx264',
)
def obtain_tSNE_projection(
embeddings: Union[torch.Tensor, np.array],
) -> np.array:
# TODO: set a grid for each of the models (3 in total),
# with 4 x 3 = 12 subplots each (4 data types, 3 dataset splits)
if type(embeddings) == torch.Tensor:
embeddings = embeddings.numpy()
if len(embeddings.shape) != 2:
raise RuntimeError(
(f'Expected input embeddings to be two-dimensional tensor'
f' but got a `{len(embeddings.shape)}-dimensional tensor instead.`')
)
tsne = manifold.TSNE(
n_components=2,
perplexity=30,
early_exaggeration=12,
learning_rate="auto",
n_iter=1000,
random_state=41,
n_jobs=-1,
)
Y = tsne.fit_transform(embeddings)
return Y
def plot_projection(
Y: np.array,
class_labels: Union[List[int], List[str], torch.Tensor],
labels: List[str],
model: str,
data_type: str,
split: str,
) -> None:
if type(class_labels) == torch.Tensor:
class_labels = deepcopy(class_labels).tolist()
# toy example
# class_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 4]
# x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
# y = [0, 1, 2, 2, 2, 2, 3, 3, 4, 4, 5, 5]
dpi = 100
fig, ax = plt.subplots(figsize=(850/dpi,850/dpi), dpi=dpi)
ax.axis('off')
scatter = plt.scatter(Y[:, 0], Y[:, 1], c=class_labels, cmap='tab10')
# scatter = plt.scatter(x, y, c=class_labels, cmap='tab10')
# lgd = ax.legend(handles=scatter.legend_elements()[0], labels=labels, ncol=1, bbox_to_anchor=(1.04,1))
ax.set_title(f'{model} - {data_type} - {split}')
# plt.savefig(f'./outputs/tsne_{model}_{data_type}_{split}.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(f'./outputs/tsne_{model}_{data_type}_{split}.png', bbox_inches='tight')
plt.close()
# maps the label's number to its corresponding string e.g. 0 -> "beauty"
def label_num_to_str(
class_labels: Union[List[int], List[str], torch.Tensor],
mapping_file: str = '../../../data/How2Sign/categoryName_categoryID.csv',
) -> List[str]:
if type(class_labels) == torch.Tensor:
class_labels = deepcopy(class_labels).tolist()
mapping = pd.read_csv(mapping_file, index_col=0, squeeze=True, sep=',').to_dict()
assert len(mapping.keys()) == 10
return mapping
def obtain_labels(targets):
mapping = label_num_to_str(targets)
labels = [k for k, v in sorted(mapping.items(), key=lambda item: item[1])]
return labels
def main(args):
MODELS = ['perceiverIO', 'lstm', 'transformer', 'transformerCLS']
DATA_TYPE = ['keypoints', 'mediapipe_keypoints', 'rotational', 'mediapipe_rotational', 'text', 'i3d', 'spot_align']
SPLIT = ['test', 'val', 'train']
for model in MODELS:
for data_type in DATA_TYPE:
for split in SPLIT:
print(f'Analyzing outputs from model = {model}, data type = {data_type}, split = {split}...', flush=True)
print(flush=True)
data = load_data_dict(f'./outputs/inference_{model}_{data_type}_{split}.pt')
# if model == 'transformerCLS':
# data = {'att_time': np.array([[-99999, -99], [-99999, -99]])}
# else:
# data = load_data_dict(f'./outputs/inference_{model}_{data_type}_{split}.pt')
targets = data['targets'] + 1
preds = data['preds'] + 1
logits = data['logits']
labels = obtain_labels(data['targets'])
Y = obtain_tSNE_projection(data['embeddings'])
print(f'Plotting projections; model = {model}, data type = {data_type}, split = {split}...', flush=True)
plot_projection(Y, targets, obtain_labels(data['targets']), model, data_type, split)
print(f'analysis_of_errors; model = {model}, data type = {data_type}, split = {split}...', flush=True)
analysis_of_errors(
targets=targets,
preds=preds,
logits=logits,
labels=labels,
model=model,
data_type=data_type,
split=split,
)
# data_dir = f'../../../../../../data/How2Sign/{data_type}'
# plot_att_time(
# att_time=data['att_time'][0],
# video_id='eSzXQQUgH1A',
# model=model,
# data_type=data_type,
# )
# viz_att(
# att_time=data['att_time'], # TODO: select the data corresponding to the -EsVrbRTMU4 video
# video_path='/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/att_viz/train/-EsVrbRTMU4-8-rgb_front.mp4',
# # csv_path='/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/video/train/-EsVrbRTMU4-8-rgb_front_gloss_timesteps.csv',
# csv_path='/home/alvaro/Documents/ML_and_DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/train_csv_frames_redone_2.csv',
# model=model, # 'lstm',
# data_type=data_type,
# )
print(flush=True)
print(f'Analyzed outputs for model = {model}, data type = {data_type}, split = {split}', flush=True)
print(flush=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parser.parse_args()
main(args)
| 27,538 | 35.235526 | 167 |
py
|
sign-topic
|
sign-topic-main/examples/SL_topic_detection/utils.py
|
import re
from typing import Dict
from typing import List
from typing import Union
from typing import Optional
from pathlib import Path
import csv
from multiprocessing import cpu_count
import pdb
import h5py
import numpy as np
import pandas as pd
from tqdm import tqdm
import sentencepiece as sp
def save_df_to_tsv(dataframe, path: Union[str, Path]):
_path = path if isinstance(path, str) else path.as_posix()
dataframe.to_csv(
_path,
sep="\t",
header=True,
index=False,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
)
def h5_video2sentence(input_tsv: Path, input_h5: Path, output_h5: Path, overwrite=False):
if not input_tsv.is_file():
raise FileNotFoundError(f"{input_tsv} not found")
if not input_h5.is_file():
raise FileNotFoundError(f"{input_h5} not found")
if output_h5.is_file() and not overwrite:
raise FileExistsError(f"{output_h5} exists. Remove it or set overwrite=True")
df = pd.read_csv(input_tsv, sep='\t')
h5_video = h5py.File(input_h5, 'r')
h5_sent = h5py.File(output_h5, 'w')
for _, r in tqdm(df.iterrows(), total=df.shape[0]):
try:
pdb.set_trace()
arr_vid = np.array(h5_video[r["VIDEO_NAME"]])
except KeyError:
print(f"Error with keypoints {r['VIDEO_NAME']}") # FIXME: The error is here, why???
continue
arr_sent = arr_vid[r["START_FRAME"]:r["END_FRAME"]+1]
h5_sent.create_dataset(r["VIDEO_NAME"], data=arr_sent)
h5_video.close()
h5_sent.close()
def natural_keys(text: str):
'''
Used for sorting strings based on natural order.
Alphanumerical ordering: 1, 10, 11, 2, 21...
Natural ordering: 1, 2, 10, 11, 21...
'''
def atof(text):
try:
retval = float(text)
except ValueError:
retval = text
return retval
return [ atof(c) for c in re.split(r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text) ]
def _groupByClip(dict_text: Dict[str, str]):
sentence_ids = list(dict_text.keys())
sentence_ids.sort(key=natural_keys)
dict_text_video = {}
for utt_id in sentence_ids:
if utt_id[:11] in dict_text_video:
dict_text_video[utt_id[:11]] += dict_text[utt_id].replace('\n', ' ')
else:
dict_text_video[utt_id[:11]] = dict_text[utt_id].replace('\n', ' ')
return dict_text_video
def load_text(file_path: str, ids: List[str], groupByClip: bool = True):
dict_text = {}
with open(file_path) as f:
for line in f:
id, text = line.split(' ', 1) # first space separates id from text
if id[:11] in ids:
dict_text[id] = text
if groupByClip:
dict_text = _groupByClip(dict_text)
return dict_text
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w", encoding="utf-8") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
| 4,554 | 30.413793 | 96 |
py
|
sign-topic
|
sign-topic-main/examples/SL_topic_detection/infer.py
|
import ast
import os
import sys
from dataclasses import dataclass, field, is_dataclass
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, Callable
from operator import attrgetter
import torch
import torch.distributed as dist
import hydra
from hydra.core.config_store import ConfigStore
from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
FairseqDataclass,
)
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.logging.progress_bar import BaseProgressBar
from fairseq.models.fairseq_model import FairseqModel
from omegaconf import OmegaConf
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(__file__).resolve().parent / "config"
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
class InferenceProcessor:
cfg: InferConfig
def __init__(self, cfg: InferConfig) -> None:
self.cfg = cfg
self.cfg.task.data = os.environ.get('DATA', cfg.task.data)
self.cfg.task.dict_path = os.environ.get('DICT_PATH', cfg.task.dict_path)
self.cfg.task.feats_type = os.environ.get('FEATS_TYPE', cfg.task.feats_type)
self.cfg.common_eval.path = os.environ.get('MODEL_PATH', cfg.common_eval.path)
self.cfg.bpe.sentencepiece_model = os.environ.get('SP_MODEL', cfg.bpe.sentencepiece_model)
self.cfg.hooks.out_file = os.environ.get('OUTPUTS_FILE', self.cfg.hooks.out_file)
self.task = tasks.setup_task(cfg.task)
self.att_time = []
self.embeddings = []
self.logits = []
model, saved_cfg = self.load_model()
self.targets = []
self.preds = []
self.model = model
if torch.cuda.is_available():
model.cuda()
self.saved_cfg = saved_cfg
self.src_dict = self.task.source_dictionary # this is None except when cfg.feats_type == 'text'
self.tgt_dict = self.task.target_dictionary # this is always None
self.cfg.dataset.dataset_split = os.environ.get('DATASET_SPLIT', self.cfg.dataset.dataset_split)
self.task.load_dataset(
self.cfg.dataset.dataset_split,
task_cfg=saved_cfg.task,
)
self.inference_timer = StopwatchMeter()
self.wps_meter = TimeMeter()
self.frames = 0
self.total_counts = 0
self.total_totals = 0
self.progress_bar = self.build_progress_bar()
def __enter__(self) -> "InferenceProcessor":
logger.info(
"num. shared model params: {:,} (num. trained: {:,})".format(
sum(
p.numel() for p in self.model.parameters() if not getattr(p, "expert", False)
),
sum(
p.numel()
for p in self.model.parameters()
if not getattr(p, "expert", False) and p.requires_grad
),
)
)
logger.info(
"num. expert model params: {} (num. trained: {})".format(
sum(p.numel() for p in self.model.parameters() if getattr(p, "expert", False)),
sum(
p.numel()
for p in self.model.parameters()
if getattr(p, "expert", False) and p.requires_grad
),
)
)
return self
def __exit__(self, *exc) -> bool:
# TODO: right before exiting, the attention maps and embeddings in self.att_time and self.embeddings should be stored in disk
# store targets, embeddings and attention weights for later usage (e.g. visualization)
outputs = {}
self.targets = torch.cat(self.targets, 0).squeeze()
outputs['targets'] = self.targets
self.preds = torch.cat(self.preds, 0).squeeze()
outputs['preds'] = self.preds
if self.cfg.hooks.embedding:
self.embeddings = torch.cat(self.embeddings, 0).squeeze()
outputs['embeddings'] = self.embeddings
if self.cfg.hooks.attention:
outputs['att_time'] = self.att_time
if self.cfg.hooks.logits:
self.logits = torch.cat(self.logits, 0).squeeze()
outputs['logits'] = self.logits
torch.save(outputs, self.cfg.hooks.out_file)
return False
def __iter__(self) -> Any:
for sample in self.progress_bar:
if not self.cfg.common.cpu or torch.cuda.is_available():
sample = utils.move_to_cuda(sample)
# Happens on the last batch.
if "net_input" not in sample:
continue
yield sample
def log(self, *args, **kwargs):
self.progress_bar.log(*args, **kwargs)
def print(self, *args, **kwargs):
self.progress_bar.print(*args, **kwargs)
def register_hooks(self, model: FairseqModel):
def get_input(container: List) -> Callable:
def hook(module, input):
container.append(input[0].detach())
return hook
def get_logits(container: List) -> Callable:
softm = torch.nn.Softmax(dim=1)
def hook(module, input, output):
logits = output.squeeze() if len(output.shape) > 2 else output
logits = logits.unsqueeze(0) if len(logits.shape) == 1 else logits
logits = softm(logits)
container.append(logits.detach())
return hook
# used for retrieving att over final embeddings in LSTM and transformer
def get_output_att1(container: List) -> Callable:
def hook(module, input, output):
container.append(output[1].detach())
return hook
# used for retrieving encoder cross-attention in PerceiverIO
def get_output_crossatt_PerceiverIO(container: List) -> Callable:
def hook(module, input, output):
att = output[3][0].detach()
if len(att.shape) != 4:
raise RuntimeError(
(f'Expected input embeddings to be four-dimensional tensor'
f' but got a `{len(att.shape)}-dimensional tensor instead.`')
)
att = torch.mean(att, (1, 2))
container.append(att)
return hook
if self.cfg.hooks.attention: # register a hook to retrieve attention maps over the input sequence
# TODO: set these layers
layers_attention_time = {
'PerceiverModel': 'encoder',
'Sign2TextTransformerModel': 'att_encoder',
'SLTopicDetectionLSTMModel': 'att_encoder',
}
hooks_time = {
'PerceiverModel': get_output_crossatt_PerceiverIO,
'Sign2TextTransformerModel': get_output_att1,
'SLTopicDetectionLSTMModel': get_output_att1,
}
# TODO: decide how to store these results. Should they be stored in a variable and then to disk after passing through the whole dataset?
# or should they rather be stored one-by-one/batch-by-batch?
model_class = model.__class__.__name__
if model_class == 'Sign2TextTransformerModel_CLS':
raise AttributeError('Cannot visualize attention with this script for model class `Sign2TextTransformerModel_CLS`.')
retriever = attrgetter(layers_attention_time[model_class])
retriever(model).register_forward_hook(hooks_time[model_class](self.att_time))
if self.cfg.hooks.embedding: # register hook to retrieve embeddings produced at the last layer before the classification head
layers_embedding = {
'PerceiverModel': 'decoder.decoder.decoding_cross_attention.attention.output',
'Sign2TextTransformerModel': 'classif_head',
'Sign2TextTransformerModel_CLS': 'classif_head', # TODO: hook for retrieving CLS token
'SLTopicDetectionLSTMModel': 'classif_head',
}
# for name, layer in model.named_modules():
# print(name, layer)
# print(f'model.__class__.__name__ {model.__class__.__name__}')
retriever = attrgetter(layers_embedding[model.__class__.__name__])
retriever(model).register_forward_pre_hook(get_input(self.embeddings))
if self.cfg.hooks.logits:
layers_logits = {
'PerceiverModel': 'decoder.decoder.final_layer',
'Sign2TextTransformerModel': 'classif_head',
'Sign2TextTransformerModel_CLS': 'classif_head',
'SLTopicDetectionLSTMModel': 'classif_head',
}
retriever = attrgetter(layers_logits[model.__class__.__name__])
retriever(model).register_forward_hook(get_logits(self.logits))
def load_model(self) -> Tuple[FairseqModel, FairseqDataclass]:
arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(self.cfg.common_eval.path, separator="\\"),
arg_overrides=arg_overrides,
task=self.task,
suffix=self.cfg.checkpoint.checkpoint_suffix,
strict=(self.cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=self.cfg.checkpoint.checkpoint_shard_count,
)
logger.info(models[0])
self.register_hooks(models[0])
return models[0], saved_cfg
def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None:
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.dataset_split),
max_tokens=self.cfg.dataset.max_tokens,
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
).next_epoch_itr(shuffle=False)
def build_progress_bar(
self,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default_log_format: str = "tqdm",
) -> BaseProgressBar:
return progress_bar.progress_bar(
iterator=self.get_dataset_itr(),
log_format=self.cfg.common.log_format,
log_interval=self.cfg.common.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=self.cfg.common.tensorboard_logdir,
default_log_format=default_log_format,
)
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
def process_sample(self, sample: Dict[str, Any]) -> None:
self.inference_timer.start()
counts, total = self.task.inference_step(
sample,
self.model,
output_attentions=(self.model.__class__.__name__=='PerceiverModel' and self.cfg.hooks.attention),
targets_container=self.targets,
preds_container=self.preds,
)
self.inference_timer.stop(total)
self.total_counts += counts
self.total_totals += total
self.frames = max(sample['net_input']['src_tokens'].shape[1], self.frames)
def log_generation_time(self) -> None:
logger.info(
"Processed %d samples (the longest one having %d frames) in %.1fs %.2f,"
" frames per second, %.2f samples per second)",
self.inference_timer.n,
self.frames,
self.inference_timer.sum,
self.frames * self.inference_timer.n / self.inference_timer.sum,
1.0 / self.inference_timer.avg * self.frames,
)
def parse_acc(wer_file: Path) -> float:
with open(wer_file, "r") as f:
return float(f.readline().strip().split(" ")[1])
def get_acc_file(cfg: InferConfig) -> Path:
"""Hashes the decoding parameters to a unique file ID."""
base_path = "acc"
return Path(base_path)
def main(cfg: InferConfig) -> float:
"""Entry point for main processing logic.
Args:
cfg: The inference configuration to use.
acc: Optional shared memory pointer for returning the accuracy. If not None,
the final accuracy value will be written here instead of being returned.
Returns:
The final accuracy if `acc` is None, otherwise None.
"""
acc_file = get_acc_file(cfg)
# Validates the provided configuration.
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 20000
if not cfg.common.cpu and not torch.cuda.is_available():
raise ValueError("CUDA not found; set `cpu=True` to run without CUDA")
with InferenceProcessor(cfg) as processor:
i = 1
for sample in processor:
processor.process_sample(sample)
i += 1
processor.log_generation_time()
counts_t, totals_t = processor.total_counts, processor.total_totals
if cfg.common.cpu:
logger.warning("Merging Accuracy requires CUDA.")
if type(counts_t) != int or type(counts_t) != int:
raise RuntimeError(
(f'counts of samples of type `{type(counts_t), type(counts_t)}`'
' are not of type `int`')
)
elif processor.data_parallel_world_size > 1:
stats = torch.LongTensor([counts_t, totals_t]).cuda()
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
counts_t, totals_t = stats[0].item(), stats[1].item()
acc = counts_t * 100.0 / totals_t
if distributed_utils.is_master(cfg.distributed_training):
with open(acc_file, "w") as f:
f.write(
(
f"Accuracy: {acc}\n"
f"counts / total = {counts_t} / {totals_t}\n\n"
)
)
return acc
@hydra.main(config_path=config_path, config_name=os.environ['CONFIG_NAME']) # TODO: set this systematically
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
logger.info("Config:\n%s", OmegaConf.to_yaml(cfg))
acc = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
acc = parse_acc(get_acc_file(cfg))
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
logger.info("Accuracy: %.4f", acc)
if cfg.is_ax:
return acc, None
return acc
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
print(f'cfg_name = {cfg_name}')
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| 17,569 | 37.030303 | 148 |
py
|
sign-topic
|
sign-topic-main/examples/SL_topic_detection/prep_how2sign.py
|
#!/usr/bin/env python3
import errno
import os
import json
import h5py
import argparse
import logging
import pandas as pd
from typing import Tuple
from typing import List
from pathlib import Path
from tempfile import NamedTemporaryFile
import cv2
import torchvision
import torch
from torch.utils.data import Dataset
from utils import (
gen_vocab,
save_df_to_tsv,
load_text,
)
log = logging.getLogger(__name__)
class How2Sign(Dataset):
'''
Create a Dataset for How2Sign.
'''
LANGUAGES = ['en'] # TODO: add 'pt'
SPLITS = ['val', 'test', 'train']
def __init__(
self,
root: str,
lang: str,
split: str,
featsType: str,
) -> None:
self.root = Path(root)
self.featsType = featsType
assert split in self.SPLITS and lang in self.LANGUAGES
assert self.root.is_dir()
try:
self.h5_sign = h5py.File(self.root / f'{split}.h5', 'r')
except:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), self.root / f'{split}.h5'
)
with h5py.File(self.root / f'{split}_filt.h5', 'w') as f:
for key in self.h5_sign.keys():
try:
f[key[:11]] = self.h5_sign[key][()]
except:
pass
self.h5_sign.close()
self.h5_sign = h5py.File(self.root / f'{split}_filt.h5', 'r')
if featsType == 'text':
self.text = load_text(self.root / f'{split}.txt', list(self.h5_sign.keys()))
elif featsType == 'spot_align':
self.categs = pd.read_csv(self.root / f'{split}_categs.csv')
self.data = pd.read_csv(self.root / f'{split}.csv')
if featsType == 'text':
self.data['TEXT'] = pd.NaT
elif featsType == 'spot_align':
self.data['CATEGORY_ID'] = pd.NaT
self.data['START_FRAME'] = pd.NaT
self.data['END_FRAME'] = pd.NaT
for i, row in self.data.iterrows():
if row['VIDEO_ID'] not in list(self.h5_sign.keys()):
print(f'Error with keypoint {row["VIDEO_ID"]}, not found inside h5_sign', flush=True)
self.data.drop(i, inplace=True)
else:
self.data.loc[i, 'START_FRAME'] = 0
self.data.loc[i, 'END_FRAME'] = torch.Tensor(self.h5_sign[row['VIDEO_ID']]).shape[0]
if featsType == 'text':
self.data.loc[i, 'TEXT'] = self.text[row['VIDEO_ID']]
elif featsType == 'spot_align':
self.data.loc[i, 'CATEGORY_ID'] = self.categs[self.categs['VIDEO_ID'] == row['VIDEO_ID']]['CATEGORY_ID'].tolist()[0]
self.data.reset_index(drop=True, inplace=True)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, str, str]:
sent_id = self.data.loc[n, 'VIDEO_ID']
src_signs = torch.Tensor(self.h5_sign[sent_id])
categ = self.data.loc[n, 'CATEGORY_ID']
if self.featsType in ['text', 'spot_align']:
text = self.data.loc[n, 'TEXT']
return sent_id, src_signs, text, categ
return sent_id, src_signs, categ
def __len__(self) -> int:
return len(self.data)
def filter_by_length(self, min_n_frames: int, max_n_frames: int) -> None:
lengths = self.data['END_FRAME'] - self.data['START_FRAME'] + 1
self.data = self.data[lengths.between(min_n_frames, max_n_frames)]
self.data.reset_index(drop=True, inplace=True)
class How2Sign_video(Dataset):
'''
Create a Dataset for How2Sign for video data.
'''
LANGUAGES = ['en'] # TODO: add 'pt'
SPLITS = ['train', 'val', 'test']
DATA_PATH = {
'train': '/mnt/gpid08/datasets/How2Sign/How2Sign/video_level/train/rgb_front/raw_videos',
'val': '/mnt/gpid08/datasets/How2Sign/How2Sign/video_level/val/rgb_front/raw_videos',
'test': '/mnt/gpid08/datasets/How2Sign/How2Sign/video_level/test/rgb_front/raw_videos',
}
# DATA_PATH = {
# 'train': './',
# 'val': './',
# 'test': './',
# }
def __init__(
self,
root: str,
lang: str,
split: str,
featsType: str,
) -> None:
self.root = Path(root)
self.featsType = featsType
assert split in self.SPLITS and lang in self.LANGUAGES
assert self.root.is_dir()
self.split = split
self.data = []
self.videonames = self.load_video_names(os.path.join(self.root, 'subset2episode.json'))[split]
# self.videonames = ['cKmtmtqeUkI-5-rgb_front', 'g1uA0f9I0Sg-5-rgb_front']
self.categories = pd.read_csv(self.root / f'{split}.csv')
self.categories = self.categories.set_index('VIDEO_ID').to_dict()['CATEGORY_ID']
# self.categories = {'cKmtmtqeUkI': 4, 'g1uA0f9I0Sg': 2}
for i, video_name in enumerate(self.videonames):
cap = cv2.VideoCapture(os.path.join(self.DATA_PATH[split], video_name + '.mp4'))
# cap = cv2.VideoCapture('/home/alvaro/Documents/ML and DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/video/cKmtmtqeUkI-5-rgb_front.mp4')
totalframecount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print("The total number of frames in this video is ", totalframecount, flush=True)
self.data.append({
'VIDEO_ID': video_name[:11],
'VIDEO_NAME': video_name,
'CATEGORY_ID': self.categories[video_name[:11]],
'START_FRAME': 0,
'END_FRAME': totalframecount - 1
})
self.data = pd.DataFrame(self.data)
self.data = self.data.drop_duplicates(subset=['VIDEO_ID'], keep='first')
self.data = self.data.drop_duplicates(subset=['VIDEO_NAME'], keep='first')
self.data.reset_index(drop=True, inplace=True)
def load_video_names(self, path: str) -> List[str]:
with open(path, 'r') as f:
data_features = json.load(f)
return data_features
def __getitem__(self, n: int) -> Tuple[torch.Tensor, str, str]:
sent_id = self.data.loc[n, 'VIDEO_ID']
src_signs = torchvision.io.read_video(filename=os.path.join(self.DATA_PATH[self.split], self.data.loc[n, 'VIDEO_NAME'] + '.mp4'))
categ = self.data.loc[n, 'CATEGORY_ID']
return sent_id, src_signs, categ
def __len__(self) -> int:
return len(self.data)
def filter_by_length(self, min_n_frames: int, max_n_frames: int) -> None:
lengths = self.data['END_FRAME'] - self.data['START_FRAME'] + 1
self.data = self.data[lengths.between(min_n_frames, max_n_frames)]
def process(args):
root = Path(args.data_root).absolute()
for split in How2Sign.SPLITS:
print(f'Processing "{split}" split', flush=True)
filt_csv = root / f'{split}_filt.csv'
for lang in How2Sign.LANGUAGES:
if args.featsType == 'video':
dataset = How2Sign_video(root, lang, split, args.featsType)
else:
dataset = How2Sign(root, lang, split, args.featsType)
print('Filtering samples by length...', flush=True)
dataset.filter_by_length(args.min_n_frames, args.max_n_frames)
print(f'{len(dataset)} samples remaining after filtering', flush=True)
if split == 'train' and args.featsType in ['text', 'spot_align']:
print(f"Generating vocab for '{lang}' language")
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{lang}"
with NamedTemporaryFile(mode="w") as f:
for i in range(len(dataset)):
f.write(dataset[i][2] + "\n")
f.seek(0)
gen_vocab(
Path(f.name),
root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=['_', '-']
)
print('Saving dataframe...', flush=True)
save_df_to_tsv(dataset.data, filt_csv)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', '-d', required=True, type=str)
parser.add_argument('--min-n-frames', default=150, type=int)
parser.add_argument('--max-n-frames', default=5500, type=int)
parser.add_argument('--featsType', default='keypoints', type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
type=str,
choices=["bpe", "unigram", "char"],
)
parser.add_argument("--vocab-size", default=8000, type=int)
args = parser.parse_args()
process(args)
if __name__ == '__main__':
main()
| 8,850 | 35.57438 | 163 |
py
|
sign-topic
|
sign-topic-main/examples/sign2vec/utils.py
|
import h5py
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
import pdb
def h5_video2sentence(input_tsv: Path, input_h5: Path, output_h5: Path, overwrite=False):
if not input_tsv.is_file():
raise FileNotFoundError(f"{input_tsv} not found")
if not input_h5.is_file():
raise FileNotFoundError(f"{input_h5} not found")
if output_h5.is_file() and not overwrite:
raise FileExistsError(f"{output_h5} exists. Remove it or set overwrite=True")
df = pd.read_csv(input_tsv, sep='\t')
h5_video = h5py.File(input_h5, 'r')
h5_sent = h5py.File(output_h5, 'w')
for _, r in tqdm(df.iterrows(), total=df.shape[0]):
try:
pdb.set_trace()
arr_vid = np.array(h5_video[r["VIDEO_NAME"]])
except KeyError:
print(f"Error with keypoints {r['VIDEO_NAME']}") #The error is here, why???
continue
print(f'arr_vid.shape: {arr_vid.shape}')
arr_sent = arr_vid[r["START_FRAME"]:r["END_FRAME"]+1]
h5_sent.create_dataset(r["SENTENCE_NAME"], data=arr_sent)
h5_video.close()
h5_sent.close()
| 1,147 | 32.764706 | 89 |
py
|
sign-topic
|
sign-topic-main/examples/sign2vec/prep_how2sign.py
|
#!/usr/bin/env python3
# This code is based on the speech_to_text implementation (commit: d974c70)
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import h5py
import argparse
import logging
import pandas as pd
from typing import Tuple
from pathlib import Path
from tempfile import NamedTemporaryFile
import torch
from torch.utils.data import Dataset
import pdb
from examples.speech_to_text.data_utils import (
gen_vocab,
save_df_to_tsv,
)
from utils import h5_video2sentence
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "signs", "n_frames", "tgt_text"]
class How2Sign(Dataset):
"""
Create a Dataset for How2Sign. Each item is a tuple of the form:
signs, target sentence
"""
LANGUAGES = ["en"] # TODO: add "pt"
SPLITS = ["train", "val", "test"]
def __init__(self, root: str, lang: str, split: str) -> None:
self.root = Path(root)
assert split in self.SPLITS and lang in self.LANGUAGES
assert self.root.is_dir()
self.h5_sign = h5py.File(self.root / f"{split}_sent.h5", "r")
self.data = pd.read_csv(self.root / f"{split}.tsv", sep="\t")
for i, row in self.data.iterrows():
#not finding anything in here.... why??
#pdb.set_trace()
if row['SENTENCE_NAME'] not in list(self.h5_sign.keys()):
print(f"Error with keypoint {row['SENTENCE_NAME']}, not found inside h5_sign")
self.data.drop(i, inplace=True)
self.data.reset_index(drop=True, inplace=True)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, str, str]:
sent_id = self.data.loc[n, 'SENTENCE_NAME']
src_signs = torch.Tensor(self.h5_sign[sent_id])
tgt_sent = self.data.loc[n, 'SENTENCE']
return sent_id, src_signs, tgt_sent
def __len__(self) -> int:
return len(self.data)
def filter_by_length(self, min_n_frames: int, max_n_frames: int) -> None:
lengths = self.data['END_FRAME'] - self.data['START_FRAME'] + 1
self.data = self.data[lengths.between(min_n_frames, max_n_frames)]
def process(args):
root = Path(args.data_root).absolute()
for split in How2Sign.SPLITS:
print(f"Processing '{split}' split")
input_tsv = root / f"{split}.tsv"
filt_tsv = root / f"{split}_filt.tsv"
if args.data_type == 'skeletons':
signs_video = root / f"{split}.h5"
signs_sentence = root / f"{split}_sent.h5"
elif args.data_type == 'i3d':
signs_video = root / f"{split}_i3d.h5"
signs_sentence = root / f"{split}_i3d_sent.h5"
else:
print('Error with data_type, not i3d or skeletons')
try:
h5_video2sentence(input_tsv, signs_video, signs_sentence, overwrite=args.overwrite)
except FileNotFoundError:
print(f"Skipping '{split}' split")
continue
except FileExistsError:
print(f"Reusing sentence-level h5 for '{split}' split. Set --overwrite to overwrite it.")
print(f'signs_video: {signs_video}, signs_sentence: {signs_sentence}')
for lang in How2Sign.LANGUAGES:
dataset = How2Sign(root, lang, split)
if split == 'train':
'''
print(f"Generating vocab for '{lang}' language")
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{lang}"
with NamedTemporaryFile(mode="w") as f:
for i in range(len(dataset)):
f.write(dataset[i][2] + "\n")
gen_vocab(
Path(f.name),
root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
'''
print("Filtering samples by length...")
dataset.filter_by_length(args.min_n_frames, args.max_n_frames)
print(f"{len(dataset)} samples after filtering")
print("Saving dataframe...")
save_df_to_tsv(dataset.data, filt_tsv)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument("--min-n-frames", default=5, type=int)
parser.add_argument("--max-n-frames", default=1000, type=int)
parser.add_argument(
"--vocab-type",
default="unigram",
type=str,
choices=["bpe", "unigram", "char"],
)
parser.add_argument("--data-type", default='skeletons', type=str)
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--overwrite", action="store_true")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 5,013 | 34.062937 | 101 |
py
|
sign-topic
|
sign-topic-main/examples/fast_noisy_channel/noisy_channel_translation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.tasks.translation import TranslationTask
from fairseq.tasks.language_modeling import LanguageModelingTask
from fairseq import checkpoint_utils
import argparse
from fairseq.tasks import register_task
import torch
@register_task("noisy_channel_translation")
class NoisyChannelTranslation(TranslationTask):
"""
Rescore the top k candidates from each beam using noisy channel modeling
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
TranslationTask.add_args(parser)
# fmt: off
parser.add_argument('--channel-model', metavar='FILE',
help='path to P(S|T) model. P(S|T) and P(T|S) must share source and target dictionaries.')
parser.add_argument('--combine-method', default='lm_only',
choices=['lm_only', 'noisy_channel'],
help="""method for combining direct and channel model scores.
lm_only: decode with P(T|S)P(T)
noisy_channel: decode with 1/t P(T|S) + 1/s(P(S|T)P(T))""")
parser.add_argument('--normalize-lm-scores-by-tgt-len', action='store_true', default=False,
help='normalize lm score by target length instead of source length')
parser.add_argument('--channel-scoring-type', default='log_norm', choices=['unnormalized', 'log_norm', 'k2_separate', 'src_vocab', 'src_vocab_batched'],
help="Normalize bw scores with log softmax or return bw scores without log softmax")
parser.add_argument('--top-k-vocab', default=0, type=int,
help='top k vocab IDs to use with `src_vocab` in channel model scoring')
parser.add_argument('--k2', default=50, type=int,
help='the top k2 candidates to rescore with the noisy channel model for each beam')
parser.add_argument('--ch-wt', default=1, type=float,
help='weight for the channel model')
parser.add_argument('--lm-model', metavar='FILE',
help='path to lm model file, to model P(T). P(T) must share the same vocab as the direct model on the target side')
parser.add_argument('--lm-data', metavar='FILE',
help='path to lm model training data for target language, used to properly load LM with correct dictionary')
parser.add_argument('--lm-wt', default=1, type=float,
help='the weight of the lm in joint decoding')
# fmt: on
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None
):
if getattr(args, "score_reference", False):
raise NotImplementedError()
else:
from .noisy_channel_sequence_generator import NoisyChannelSequenceGenerator
use_cuda = torch.cuda.is_available() and not self.args.cpu
assert self.args.lm_model is not None, '--lm-model required for noisy channel generation!'
assert self.args.lm_data is not None, '--lm-data required for noisy channel generation to map between LM and bitext vocabs'
if self.args.channel_model is not None:
import copy
ch_args_task = copy.deepcopy(self.args)
tmp = ch_args_task.source_lang
ch_args_task.source_lang = ch_args_task.target_lang
ch_args_task.target_lang = tmp
ch_args_task._name = 'translation'
channel_task = TranslationTask.setup_task(ch_args_task)
arg_dict = {}
arg_dict['task'] = 'language_modeling'
arg_dict['sample_break_mode'] = 'eos'
arg_dict['data'] = self.args.lm_data
arg_dict['output_dictionary_size'] = -1
lm_args = argparse.Namespace(**arg_dict)
lm_task = LanguageModelingTask.setup_task(lm_args)
lm_dict = lm_task.output_dictionary
if self.args.channel_model is not None:
channel_models, _ = checkpoint_utils.load_model_ensemble(self.args.channel_model.split(':'), task=channel_task)
for model in channel_models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if self.args.fp16:
model.half()
if use_cuda:
model.cuda()
else:
channel_models = None
lm_models, _ = checkpoint_utils.load_model_ensemble(self.args.lm_model.split(':'), task=lm_task)
for model in lm_models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if self.args.fp16:
model.half()
if use_cuda:
model.cuda()
return NoisyChannelSequenceGenerator(
combine_method=self.args.combine_method,
tgt_dict=self.target_dictionary,
src_dict=self.source_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
temperature=getattr(args, 'temperature', 1.),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
normalize_scores=(not getattr(args, 'unnormalized', False)),
channel_models=channel_models,
k2=getattr(self.args, 'k2', 50),
ch_weight=getattr(self.args, 'ch_wt', 1),
channel_scoring_type=self.args.channel_scoring_type,
top_k_vocab=self.args.top_k_vocab,
lm_models=lm_models,
lm_dict=lm_dict,
lm_weight=getattr(self.args, 'lm_wt', 1),
normalize_lm_scores_by_tgt_len=getattr(self.args, 'normalize_lm_scores_by_tgt_len', False),
)
| 6,709 | 51.421875 | 160 |
py
|
sign-topic
|
sign-topic-main/examples/fast_noisy_channel/noisy_channel_beam_search.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.search import Search
class NoisyChannelBeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.fw_scores_buf = None
self.lm_scores_buf = None
def _init_buffers(self, t):
# super()._init_buffers(t)
if self.fw_scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
self.beams_buf = torch.LongTensor().to(device=t.device)
self.fw_scores_buf = t.new()
self.lm_scores_buf = t.new()
def combine_fw_bw(self, combine_method, fw_cum, bw, step):
if combine_method == "noisy_channel":
fw_norm = fw_cum.div(step + 1)
lprobs = bw + fw_norm
elif combine_method == "lm_only":
lprobs = bw + fw_cum
return lprobs
def step(self, step, fw_lprobs, scores, bw_lprobs, lm_lprobs, combine_method):
self._init_buffers(fw_lprobs)
bsz, beam_size, vocab_size = fw_lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
fw_lprobs = fw_lprobs[:, ::beam_size, :].contiguous()
bw_lprobs = bw_lprobs[:, ::beam_size, :].contiguous()
# nothing to add since we are at the first step
fw_lprobs_cum = fw_lprobs
else:
# make probs contain cumulative scores for each hypothesis
raw_scores = (scores[:, :, step - 1].unsqueeze(-1))
fw_lprobs_cum = (fw_lprobs.add(raw_scores))
combined_lprobs = self.combine_fw_bw(combine_method, fw_lprobs_cum, bw_lprobs, step)
# choose the top k according to the combined noisy channel model score
torch.topk(
combined_lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
combined_lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
out=(self.scores_buf, self.indices_buf),
)
# save corresponding fw and lm scores
self.fw_scores_buf = torch.gather(fw_lprobs_cum.view(bsz, -1), 1, self.indices_buf)
self.lm_scores_buf = torch.gather(lm_lprobs.view(bsz, -1), 1, self.indices_buf)
# Project back into relative indices and beams
self.beams_buf = self.indices_buf // vocab_size
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.fw_scores_buf, self.lm_scores_buf, self.indices_buf, self.beams_buf
| 2,895 | 39.222222 | 104 |
py
|
sign-topic
|
sign-topic-main/examples/fast_noisy_channel/noisy_channel_sequence_generator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from .noisy_channel_beam_search import NoisyChannelBeamSearch
from fairseq.sequence_generator import EnsembleModel
class NoisyChannelSequenceGenerator(object):
def __init__(
self,
combine_method,
tgt_dict,
src_dict=None,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
len_penalty=1.0,
unk_penalty=0.0,
retain_dropout=False,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
normalize_scores=True,
channel_models=None,
k2=10,
ch_weight=1.0,
channel_scoring_type='log_norm',
top_k_vocab=0,
lm_models=None,
lm_dict=None,
lm_weight=1.0,
normalize_lm_scores_by_tgt_len=False,
):
"""Generates translations of a given source sentence,
using beam search with noisy channel decoding.
Args:
combine_method (string, optional): Method to combine direct, LM and
channel model scores (default: None)
tgt_dict (~fairseq.data.Dictionary): target dictionary
src_dict (~fairseq.data.Dictionary): source dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
no_repeat_ngram_size (int, optional): Size of n-grams that we avoid
repeating in the generation (default: 0)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
channel_models (List[~fairseq.models.FairseqModel]): ensemble of models
translating from the target to the source
k2 (int, optional): Top K2 candidates to score per beam at each step (default:10)
ch_weight (int, optional): Weight associated with the channel model score
assuming that the direct model score has weight 1.0 (default: 1.0)
channel_scoring_type (str, optional): String specifying how to score
the channel model (default: 'log_norm')
top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or
`'src_vocab_batched'`, then this parameter specifies the number of
most frequent tokens to include in the channel model output vocabulary,
in addition to the source tokens in the input batch (default: 0)
lm_models (List[~fairseq.models.FairseqModel]): ensemble of models
generating text in the target language
lm_dict (~fairseq.data.Dictionary): LM Model dictionary
lm_weight (int, optional): Weight associated with the LM model score
assuming that the direct model score has weight 1.0 (default: 1.0)
normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores
by the target length? By default, we normalize the combination of
LM and channel model scores by the source length
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
self.channel_models = channel_models
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.combine_method = combine_method
self.k2 = k2
self.ch_weight = ch_weight
self.channel_scoring_type = channel_scoring_type
self.top_k_vocab = top_k_vocab
self.lm_models = lm_models
self.lm_dict = lm_dict
self.lm_weight = lm_weight
self.log_softmax_fn = torch.nn.LogSoftmax(dim=1)
self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len
self.share_tgt_dict = (self.lm_dict == self.tgt_dict)
self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict)
self.ch_scoring_bsz = 3072
assert temperature > 0, '--temperature must be greater than 0'
self.search = NoisyChannelBeamSearch(tgt_dict)
@torch.no_grad()
def generate(
self,
models,
sample,
prefix_tokens=None,
bos_token=None,
**kwargs
):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
"""
model = EnsembleModel(models)
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(model.models_size)
],
)
if not self.retain_dropout:
model.eval()
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths_no_eos.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
src_lengths = encoder_input['src_lengths']
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
# reorder source tokens so they may be used as a reference in generating P(S|T)
src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index)
src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len)
src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1)
attn, attn_buf = None, None
nonpad_idxs = None
# The cands_to_ignore indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
fw scores for each hypothesis
combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing
combined noisy channel scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths_no_eos[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i][nonpad_idxs[sent]]
_, alignment = hypo_attn.max(dim=0)
else:
hypo_attn = None
alignment = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': alignment,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k):
"""Rescore the top k hypothesis from each beam using noisy channel modeling
Returns:
new_fw_lprobs: the direct model probabilities after pruning the top k
new_ch_lm_lprobs: the combined channel and language model probabilities
new_lm_lprobs: the language model probabilities after pruning the top k
"""
with torch.no_grad():
lprobs_size = lprobs.size()
if prefix_tokens is not None and step < prefix_tokens.size(1):
probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :]
cand_scores = torch.gather(
probs_slice, dim=1,
index=prefix_tokens[:, step].view(-1, 1).data
).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1)
cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1)
# need to calculate and save fw and lm probs for prefix tokens
fw_top_k = cand_scores
fw_top_k_idx = cand_indices
k = 1
else:
# take the top k best words for every sentence in batch*beam
fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k)
eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0]
ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0)
src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype)
if self.combine_method != "lm_only":
temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1)
not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index
cur_tgt_size = step+2
# add eos to all candidate sentences except those that already end in eos
eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1)
eos_tokens[eos_idx] = self.tgt_dict.pad_index
if step == 0:
channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1)
else:
# move eos from beginning to end of target sentence
channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1)
ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size))
ch_input_lengths[eos_idx] = cur_tgt_size-1
if self.channel_scoring_type == "unnormalized":
ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths)
ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True)
del ch_encoder_output
ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:])
ch_intermed_scores = ch_intermed_scores.float()
ch_intermed_scores *= not_padding.float()
ch_scores = torch.sum(ch_intermed_scores, dim=1)
elif self.channel_scoring_type == "k2_separate":
for k_idx in range(k):
k_eos_tokens = eos_tokens[k_idx::k, :]
if step == 0:
k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1)
else:
# move eos from beginning to end of target sentence
k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1)
k_ch_input_lengths = ch_input_lengths[k_idx::k]
k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens)
k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True)
k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2)
k_ch_intermed_scores *= not_padding.float()
ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1)
elif self.channel_scoring_type == "src_vocab":
ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths)
ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True)
del ch_encoder_output
ch_lprobs = normalized_scores_with_batch_vocab(
channel_model.decoder,
ch_decoder_output, src_tokens, k, bsz, beam_size,
self.src_dict.pad_index, top_k=self.top_k_vocab)
ch_scores = torch.sum(ch_lprobs, dim=1)
elif self.channel_scoring_type == "src_vocab_batched":
ch_bsz_size = temp_src_tokens_full.shape[0]
ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz))
for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)):
end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size)
temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :]
channel_input_batch = channel_input[start_idx:end_idx, :]
ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx]
ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch)
ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True)
ch_lprobs_list[i] = normalized_scores_with_batch_vocab(
channel_model.decoder,
ch_decoder_output_batch, src_tokens, k, bsz, beam_size,
self.src_dict.pad_index, top_k=self.top_k_vocab,
start_idx=start_idx, end_idx=end_idx)
ch_lprobs = torch.cat(ch_lprobs_list, dim=0)
ch_scores = torch.sum(ch_lprobs, dim=1)
else:
ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full)
ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True)
ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1)
ch_intermed_scores *= not_padding.float()
ch_scores = torch.sum(ch_intermed_scores, dim=1)
else:
cur_tgt_size = 0
ch_scores = ch_scores.view(bsz*beam_size, k)
expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten()
if self.share_tgt_dict:
lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k)
else:
new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm)
new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm)
lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k)
lm_scores.add_(expanded_lm_prefix_scores)
ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size)
# initialize all as min value
new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1)
new_fw_lprobs[:, self.pad] = -math.inf
new_ch_lm_lprobs[:, self.pad] = -math.inf
new_lm_lprobs[:, self.pad] = -math.inf
new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k)
new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores)
new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k))
return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs
def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size):
if self.channel_scoring_type == "unnormalized":
ch_scores = self.log_softmax_fn(
ch_scores.view(-1, self.beam_size * self.k2)
).view(ch_scores.shape)
ch_scores = ch_scores * self.ch_weight
lm_scores1 = lm_scores1 * self.lm_weight
if combine_type == "lm_only":
# log P(T|S) + log P(T)
ch_scores = lm_scores1.view(ch_scores.size())
elif combine_type == "noisy_channel":
# 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T)
if self.normalize_lm_scores_by_tgt_len:
ch_scores.div_(src_size)
lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size)
ch_scores.add_(lm_scores_norm)
# 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T)
else:
ch_scores.add_(lm_scores1.view(ch_scores.size()))
ch_scores.div_(src_size)
return ch_scores
if self.channel_models is not None:
channel_model = self.channel_models[0] # assume only one channel_model model
else:
channel_model = None
lm = EnsembleModel(self.lm_models)
lm_incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(lm.models_size)
],
)
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lm.reorder_incremental_state(lm_incremental_states, reorder_state)
fw_lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature,
)
fw_lprobs[:, self.pad] = -math.inf # never select pad
fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2)
# handle min and max length constraints
if step >= max_len:
fw_lprobs[:, :self.eos] = -math.inf
fw_lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
fw_lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1):
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_mask = prefix_toks.ne(self.pad)
prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
fw_lprobs[prefix_mask] = -math.inf
fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs
)
prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
ch_lm_lprobs[prefix_mask] = -math.inf
ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs
)
prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1))
lm_lprobs[prefix_mask] = -math.inf
lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim)
ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim)
lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
nonpad_idxs = src_tokens.ne(self.pad)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(fw_lprobs)
scores_buf = scores_buf.type_as(fw_lprobs)
self.search.set_src_lengths(src_lengths_no_eos)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step(
step,
fw_lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size),
lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for candidates to be ignored)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size] &= ~cands_to_ignore
# only consider eos when it's among the top beam_size indices
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size]
)
combined_noisy_channel_eos_scores = torch.masked_select(
combined_noisy_channel_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
)
# finalize hypo using channel model score
finalized_sents = finalize_hypos(
step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = torch.nonzero(batch_mask).squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs]
fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths_no_eos = src_lengths_no_eos[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze()
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# ignored hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
eos_mask[:, :beam_size] |= cands_to_ignore
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_cands_to_ignore, active_hypos)
)
# update cands_to_ignore to ignore any finalized hypos
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
assert (~cands_to_ignore).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
fw_lprobs_top_k, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
fw_lprobs_top_k, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
torch.gather(
lm_lprobs_top_k, dim=1, index=active_hypos,
out=lm_prefix_scores.view(bsz, beam_size)
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k):
with torch.no_grad():
lm_lprobs, avg_attn_scores = model.forward_decoder(
input_tokens, encoder_outs=None, incremental_states=incremental_states,
)
lm_lprobs_size = lm_lprobs.size(0)
probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1)
return probs_next_wrd
def make_dict2dict(old_dict, new_dict):
dict2dict_map = {}
for sym in old_dict.symbols:
dict2dict_map[old_dict.index(sym)] = new_dict.index(sym)
return dict2dict_map
def dict2dict(tokens, dict2dict_map):
if tokens.device == torch.device('cpu'):
tokens_tmp = tokens
else:
tokens_tmp = tokens.cpu()
return tokens_tmp.map_(
tokens_tmp,
lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)]
).to(tokens.device)
def reorder_tokens(tokens, lengths, eos):
# reorder source tokens so they may be used as reference for P(S|T)
return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0)
def reorder_all_tokens(tokens, lengths, eos):
# used to reorder src tokens from [<pad> <w1> <w2> .. <eos>] to [<eos> <w1> <w2>...<pad>]
# so source tokens can be used to predict P(S|T)
return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)])
def normalized_scores_with_batch_vocab(
model_decoder, features, target_ids, k, bsz, beam_size,
pad_idx, top_k=0, vocab_size_meter=None, start_idx=None,
end_idx=None, **kwargs):
"""
Get normalized probabilities (or log probs) from a net's output
w.r.t. vocab consisting of target IDs in the batch
"""
if model_decoder.adaptive_softmax is None:
weight = model_decoder.output_projection.weight
vocab_ids = torch.unique(
torch.cat(
(torch.unique(target_ids), torch.arange(top_k, device=target_ids.device))
)
)
id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids))))
mapped_target_ids = target_ids.cpu().apply_(
lambda x, id_map=id_map: id_map[x]
).to(target_ids.device)
expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1)
if start_idx is not None and end_idx is not None:
expanded_target_ids = expanded_target_ids[start_idx:end_idx, :]
logits = F.linear(features, weight[vocab_ids, :])
log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32)
intermed_scores = torch.gather(
log_softmax[:, :-1, :],
2,
expanded_target_ids[:, 1:].unsqueeze(2),
).squeeze()
not_padding = expanded_target_ids[:, 1:] != pad_idx
intermed_scores *= not_padding.float()
return intermed_scores
else:
raise ValueError("adaptive softmax doesn't work with " +
"`normalized_scores_with_batch_vocab()`")
| 41,200 | 47.874259 | 187 |
py
|
sign-topic
|
sign-topic-main/examples/fast_noisy_channel/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import noisy_channel_translation # noqa
from . import noisy_channel_sequence_generator # noqa
from . import noisy_channel_beam_search # noqa
| 329 | 35.666667 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/noisychannel/rerank_score_bw.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from contextlib import redirect_stdout
from fairseq import options
from fairseq_cli import generate
from examples.noisychannel import rerank_options, rerank_utils
def score_bw(args):
if args.backwards1:
scorer1_src = args.target_lang
scorer1_tgt = args.source_lang
else:
scorer1_src = args.source_lang
scorer1_tgt = args.target_lang
if args.score_model2 is not None:
if args.backwards2:
scorer2_src = args.target_lang
scorer2_tgt = args.source_lang
else:
scorer2_src = args.source_lang
scorer2_tgt = args.target_lang
rerank1_is_gen = (
args.gen_model == args.score_model1 and args.source_prefix_frac is None
)
rerank2_is_gen = (
args.gen_model == args.score_model2 and args.source_prefix_frac is None
)
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
args.shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
score1_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1,
)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2,
)
if args.right_to_left1:
rerank_data1 = right_to_left_preprocessed_dir
elif args.backwards1:
rerank_data1 = backwards_preprocessed_dir
else:
rerank_data1 = left_to_right_preprocessed_dir
gen_param = ["--batch-size", str(128), "--score-reference", "--gen-subset", "train"]
if not rerank1_is_gen and not os.path.isfile(score1_file):
print("STEP 4: score the translations for model 1")
model_param1 = [
"--path",
args.score_model1,
"--source-lang",
scorer1_src,
"--target-lang",
scorer1_tgt,
]
gen_model1_param = [rerank_data1] + gen_param + model_param1
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, gen_model1_param)
with open(score1_file, "w") as f:
with redirect_stdout(f):
generate.main(input_args)
if (
args.score_model2 is not None
and not os.path.isfile(score2_file)
and not rerank2_is_gen
):
print("STEP 4: score the translations for model 2")
if args.right_to_left2:
rerank_data2 = right_to_left_preprocessed_dir
elif args.backwards2:
rerank_data2 = backwards_preprocessed_dir
else:
rerank_data2 = left_to_right_preprocessed_dir
model_param2 = [
"--path",
args.score_model2,
"--source-lang",
scorer2_src,
"--target-lang",
scorer2_tgt,
]
gen_model2_param = [rerank_data2] + gen_param + model_param2
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, gen_model2_param)
with open(score2_file, "w") as f:
with redirect_stdout(f):
generate.main(input_args)
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
score_bw(args)
if __name__ == "__main__":
cli_main()
| 4,212 | 28.256944 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/noisychannel/rerank_generate.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Generate n-best translations using a trained model.
"""
import os
import subprocess
from contextlib import redirect_stdout
from fairseq import options
from fairseq_cli import generate, preprocess
from examples.noisychannel import rerank_options, rerank_utils
def gen_and_reprocess_nbest(args):
if args.score_dict_dir is None:
args.score_dict_dir = args.data
if args.prefix_len is not None:
assert (
args.right_to_left1 is False
), "prefix length not compatible with right to left models"
assert (
args.right_to_left2 is False
), "prefix length not compatible with right to left models"
if args.nbest_list is not None:
assert args.score_model2 is None
if args.backwards1:
scorer1_src = args.target_lang
scorer1_tgt = args.source_lang
else:
scorer1_src = args.source_lang
scorer1_tgt = args.target_lang
store_data = (
os.path.join(os.path.dirname(__file__)) + "/rerank_data/" + args.data_dir_name
)
if not os.path.exists(store_data):
os.makedirs(store_data)
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
args.shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
assert not (
args.right_to_left1 and args.backwards1
), "backwards right to left not supported"
assert not (
args.right_to_left2 and args.backwards2
), "backwards right to left not supported"
assert not (
args.prefix_len is not None and args.target_prefix_frac is not None
), "target prefix frac and target prefix len incompatible"
# make directory to store generation results
if not os.path.exists(pre_gen):
os.makedirs(pre_gen)
rerank1_is_gen = (
args.gen_model == args.score_model1 and args.source_prefix_frac is None
)
rerank2_is_gen = (
args.gen_model == args.score_model2 and args.source_prefix_frac is None
)
if args.nbest_list is not None:
rerank2_is_gen = True
# make directories to store preprossed nbest list for reranking
if not os.path.exists(left_to_right_preprocessed_dir):
os.makedirs(left_to_right_preprocessed_dir)
if not os.path.exists(right_to_left_preprocessed_dir):
os.makedirs(right_to_left_preprocessed_dir)
if not os.path.exists(lm_preprocessed_dir):
os.makedirs(lm_preprocessed_dir)
if not os.path.exists(backwards_preprocessed_dir):
os.makedirs(backwards_preprocessed_dir)
score1_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1,
)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2,
)
predictions_bpe_file = pre_gen + "/generate_output_bpe.txt"
using_nbest = args.nbest_list is not None
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
else:
if not os.path.isfile(predictions_bpe_file):
print("STEP 1: generate predictions using the p(T|S) model with bpe")
print(args.data)
param1 = [
args.data,
"--path",
args.gen_model,
"--shard-id",
str(args.shard_id),
"--num-shards",
str(args.num_shards),
"--nbest",
str(args.num_rescore),
"--batch-size",
str(args.batch_size),
"--beam",
str(args.num_rescore),
"--batch-size",
str(args.num_rescore),
"--gen-subset",
args.gen_subset,
"--source-lang",
args.source_lang,
"--target-lang",
args.target_lang,
]
if args.sampling:
param1 += ["--sampling"]
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, param1)
print(input_args)
with open(predictions_bpe_file, "w") as f:
with redirect_stdout(f):
generate.main(input_args)
gen_output = rerank_utils.BitextOutputFromGen(
predictions_bpe_file,
bpe_symbol=args.post_process,
nbest=using_nbest,
prefix_len=args.prefix_len,
target_prefix_frac=args.target_prefix_frac,
)
if args.diff_bpe:
rerank_utils.write_reprocessed(
gen_output.no_bpe_source,
gen_output.no_bpe_hypo,
gen_output.no_bpe_target,
pre_gen + "/source_gen_bpe." + args.source_lang,
pre_gen + "/target_gen_bpe." + args.target_lang,
pre_gen + "/reference_gen_bpe." + args.target_lang,
)
bitext_bpe = args.rescore_bpe_code
bpe_src_param = [
"-c",
bitext_bpe,
"--input",
pre_gen + "/source_gen_bpe." + args.source_lang,
"--output",
pre_gen + "/rescore_data." + args.source_lang,
]
bpe_tgt_param = [
"-c",
bitext_bpe,
"--input",
pre_gen + "/target_gen_bpe." + args.target_lang,
"--output",
pre_gen + "/rescore_data." + args.target_lang,
]
subprocess.call(
[
"python",
os.path.join(
os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py"
),
]
+ bpe_src_param,
shell=False,
)
subprocess.call(
[
"python",
os.path.join(
os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py"
),
]
+ bpe_tgt_param,
shell=False,
)
if (not os.path.isfile(score1_file) and not rerank1_is_gen) or (
args.score_model2 is not None
and not os.path.isfile(score2_file)
and not rerank2_is_gen
):
print(
"STEP 2: process the output of generate.py so we have clean text files with the translations"
)
rescore_file = "/rescore_data"
if args.prefix_len is not None:
prefix_len_rescore_file = rescore_file + "prefix" + str(args.prefix_len)
if args.target_prefix_frac is not None:
target_prefix_frac_rescore_file = (
rescore_file + "target_prefix_frac" + str(args.target_prefix_frac)
)
if args.source_prefix_frac is not None:
source_prefix_frac_rescore_file = (
rescore_file + "source_prefix_frac" + str(args.source_prefix_frac)
)
if not args.right_to_left1 or not args.right_to_left2:
if not args.diff_bpe:
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen + rescore_file + "." + args.source_lang,
pre_gen + rescore_file + "." + args.target_lang,
pre_gen + "/reference_file",
bpe_symbol=args.post_process,
)
if args.prefix_len is not None:
bw_rescore_file = prefix_len_rescore_file
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen + prefix_len_rescore_file + "." + args.source_lang,
pre_gen + prefix_len_rescore_file + "." + args.target_lang,
pre_gen + "/reference_file",
prefix_len=args.prefix_len,
bpe_symbol=args.post_process,
)
elif args.target_prefix_frac is not None:
bw_rescore_file = target_prefix_frac_rescore_file
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen
+ target_prefix_frac_rescore_file
+ "."
+ args.source_lang,
pre_gen
+ target_prefix_frac_rescore_file
+ "."
+ args.target_lang,
pre_gen + "/reference_file",
bpe_symbol=args.post_process,
target_prefix_frac=args.target_prefix_frac,
)
else:
bw_rescore_file = rescore_file
if args.source_prefix_frac is not None:
fw_rescore_file = source_prefix_frac_rescore_file
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen
+ source_prefix_frac_rescore_file
+ "."
+ args.source_lang,
pre_gen
+ source_prefix_frac_rescore_file
+ "."
+ args.target_lang,
pre_gen + "/reference_file",
bpe_symbol=args.post_process,
source_prefix_frac=args.source_prefix_frac,
)
else:
fw_rescore_file = rescore_file
if args.right_to_left1 or args.right_to_left2:
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen + "/right_to_left_rescore_data." + args.source_lang,
pre_gen + "/right_to_left_rescore_data." + args.target_lang,
pre_gen + "/right_to_left_reference_file",
right_to_left=True,
bpe_symbol=args.post_process,
)
print("STEP 3: binarize the translations")
if (
not args.right_to_left1
or args.score_model2 is not None
and not args.right_to_left2
or not rerank1_is_gen
):
if args.backwards1 or args.backwards2:
if args.backwards_score_dict_dir is not None:
bw_dict = args.backwards_score_dict_dir
else:
bw_dict = args.score_dict_dir
bw_preprocess_param = [
"--source-lang",
scorer1_src,
"--target-lang",
scorer1_tgt,
"--trainpref",
pre_gen + bw_rescore_file,
"--srcdict",
bw_dict + "/dict." + scorer1_src + ".txt",
"--tgtdict",
bw_dict + "/dict." + scorer1_tgt + ".txt",
"--destdir",
backwards_preprocessed_dir,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(bw_preprocess_param)
preprocess.main(input_args)
preprocess_param = [
"--source-lang",
scorer1_src,
"--target-lang",
scorer1_tgt,
"--trainpref",
pre_gen + fw_rescore_file,
"--srcdict",
args.score_dict_dir + "/dict." + scorer1_src + ".txt",
"--tgtdict",
args.score_dict_dir + "/dict." + scorer1_tgt + ".txt",
"--destdir",
left_to_right_preprocessed_dir,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_param)
preprocess.main(input_args)
if args.right_to_left1 or args.right_to_left2:
preprocess_param = [
"--source-lang",
scorer1_src,
"--target-lang",
scorer1_tgt,
"--trainpref",
pre_gen + "/right_to_left_rescore_data",
"--srcdict",
args.score_dict_dir + "/dict." + scorer1_src + ".txt",
"--tgtdict",
args.score_dict_dir + "/dict." + scorer1_tgt + ".txt",
"--destdir",
right_to_left_preprocessed_dir,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_param)
preprocess.main(input_args)
return gen_output
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
gen_and_reprocess_nbest(args)
if __name__ == "__main__":
cli_main()
| 14,157 | 34.572864 | 105 |
py
|
sign-topic
|
sign-topic-main/examples/noisychannel/rerank_score_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from fairseq import options
from examples.noisychannel import rerank_options, rerank_utils
def score_lm(args):
using_nbest = args.nbest_list is not None
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
args.shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
predictions_bpe_file = pre_gen + "/generate_output_bpe.txt"
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
gen_output = rerank_utils.BitextOutputFromGen(
predictions_bpe_file, bpe_symbol=args.post_process, nbest=using_nbest
)
if args.language_model is not None:
lm_score_file = rerank_utils.rescore_file_name(
pre_gen, args.prefix_len, args.lm_name, lm_file=True
)
if args.language_model is not None and not os.path.isfile(lm_score_file):
print("STEP 4.5: language modeling for P(T)")
if args.lm_bpe_code is None:
bpe_status = "no bpe"
elif args.lm_bpe_code == "shared":
bpe_status = "shared"
else:
bpe_status = "different"
rerank_utils.lm_scoring(
lm_preprocessed_dir,
bpe_status,
gen_output,
pre_gen,
args.lm_dict,
args.lm_name,
args.language_model,
args.lm_bpe_code,
128,
lm_score_file,
args.target_lang,
args.source_lang,
prefix_len=args.prefix_len,
)
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
score_lm(args)
if __name__ == "__main__":
cli_main()
| 2,253 | 26.487805 | 77 |
py
|
sign-topic
|
sign-topic-main/examples/noisychannel/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .rerank_options import * # noqa
| 216 | 30 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/noisychannel/rerank.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from multiprocessing import Pool
import numpy as np
from fairseq import options
from fairseq.data import dictionary
from fairseq.scoring import bleu
from examples.noisychannel import (
rerank_generate,
rerank_options,
rerank_score_bw,
rerank_score_lm,
rerank_utils,
)
def score_target_hypo(
args, a, b, c, lenpen, target_outfile, hypo_outfile, write_hypos, normalize
):
print("lenpen", lenpen, "weight1", a, "weight2", b, "weight3", c)
gen_output_lst, bitext1_lst, bitext2_lst, lm_res_lst = load_score_files(args)
dict = dictionary.Dictionary()
scorer = scorer = bleu.Scorer(
bleu.BleuConfig(
pad=dict.pad(),
eos=dict.eos(),
unk=dict.unk(),
)
)
ordered_hypos = {}
ordered_targets = {}
for shard_id in range(len(bitext1_lst)):
bitext1 = bitext1_lst[shard_id]
bitext2 = bitext2_lst[shard_id]
gen_output = gen_output_lst[shard_id]
lm_res = lm_res_lst[shard_id]
total = len(bitext1.rescore_source.keys())
source_lst = []
hypo_lst = []
score_lst = []
reference_lst = []
j = 1
best_score = -math.inf
for i in range(total):
# length is measured in terms of words, not bpe tokens, since models may not share the same bpe
target_len = len(bitext1.rescore_hypo[i].split())
if lm_res is not None:
lm_score = lm_res.score[i]
else:
lm_score = 0
if bitext2 is not None:
bitext2_score = bitext2.rescore_score[i]
bitext2_backwards = bitext2.backwards
else:
bitext2_score = None
bitext2_backwards = None
score = rerank_utils.get_score(
a,
b,
c,
target_len,
bitext1.rescore_score[i],
bitext2_score,
lm_score=lm_score,
lenpen=lenpen,
src_len=bitext1.source_lengths[i],
tgt_len=bitext1.target_lengths[i],
bitext1_backwards=bitext1.backwards,
bitext2_backwards=bitext2_backwards,
normalize=normalize,
)
if score > best_score:
best_score = score
best_hypo = bitext1.rescore_hypo[i]
if j == gen_output.num_hypos[i] or j == args.num_rescore:
j = 1
hypo_lst.append(best_hypo)
score_lst.append(best_score)
source_lst.append(bitext1.rescore_source[i])
reference_lst.append(bitext1.rescore_target[i])
best_score = -math.inf
best_hypo = ""
else:
j += 1
gen_keys = list(sorted(gen_output.no_bpe_target.keys()))
for key in range(len(gen_keys)):
if args.prefix_len is None:
assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], (
"pred and rescore hypo mismatch: i: "
+ str(key)
+ ", "
+ str(hypo_lst[key])
+ str(gen_keys[key])
+ str(gen_output.no_bpe_hypo[key])
)
sys_tok = dict.encode_line(hypo_lst[key])
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
else:
full_hypo = rerank_utils.get_full_from_prefix(
hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]]
)
sys_tok = dict.encode_line(full_hypo)
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
# if only one set of hyper parameters is provided, write the predictions to a file
if write_hypos:
# recover the orinal ids from n best list generation
for key in range(len(gen_output.no_bpe_target)):
if args.prefix_len is None:
assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], (
"pred and rescore hypo mismatch:"
+ "i:"
+ str(key)
+ str(hypo_lst[key])
+ str(gen_output.no_bpe_hypo[key])
)
ordered_hypos[gen_keys[key]] = hypo_lst[key]
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[
gen_keys[key]
]
else:
full_hypo = rerank_utils.get_full_from_prefix(
hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]]
)
ordered_hypos[gen_keys[key]] = full_hypo
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[
gen_keys[key]
]
# write the hypos in the original order from nbest list generation
if args.num_shards == (len(bitext1_lst)):
with open(target_outfile, "w") as t:
with open(hypo_outfile, "w") as h:
for key in range(len(ordered_hypos)):
t.write(ordered_targets[key])
h.write(ordered_hypos[key])
res = scorer.result_string(4)
if write_hypos:
print(res)
score = rerank_utils.parse_bleu_scoring(res)
return score
def match_target_hypo(args, target_outfile, hypo_outfile):
"""combine scores from the LM and bitext models, and write the top scoring hypothesis to a file"""
if len(args.weight1) == 1:
res = score_target_hypo(
args,
args.weight1[0],
args.weight2[0],
args.weight3[0],
args.lenpen[0],
target_outfile,
hypo_outfile,
True,
args.normalize,
)
rerank_scores = [res]
else:
print("launching pool")
with Pool(32) as p:
rerank_scores = p.starmap(
score_target_hypo,
[
(
args,
args.weight1[i],
args.weight2[i],
args.weight3[i],
args.lenpen[i],
target_outfile,
hypo_outfile,
False,
args.normalize,
)
for i in range(len(args.weight1))
],
)
if len(rerank_scores) > 1:
best_index = np.argmax(rerank_scores)
best_score = rerank_scores[best_index]
print("best score", best_score)
print("best lenpen", args.lenpen[best_index])
print("best weight1", args.weight1[best_index])
print("best weight2", args.weight2[best_index])
print("best weight3", args.weight3[best_index])
return (
args.lenpen[best_index],
args.weight1[best_index],
args.weight2[best_index],
args.weight3[best_index],
best_score,
)
else:
return (
args.lenpen[0],
args.weight1[0],
args.weight2[0],
args.weight3[0],
rerank_scores[0],
)
def load_score_files(args):
if args.all_shards:
shard_ids = list(range(args.num_shards))
else:
shard_ids = [args.shard_id]
gen_output_lst = []
bitext1_lst = []
bitext2_lst = []
lm_res1_lst = []
for shard_id in shard_ids:
using_nbest = args.nbest_list is not None
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
rerank1_is_gen = (
args.gen_model == args.score_model1 and args.source_prefix_frac is None
)
rerank2_is_gen = (
args.gen_model == args.score_model2 and args.source_prefix_frac is None
)
score1_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1,
)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2,
)
if args.language_model is not None:
lm_score_file = rerank_utils.rescore_file_name(
pre_gen, args.prefix_len, args.lm_name, lm_file=True
)
# get gen output
predictions_bpe_file = pre_gen + "/generate_output_bpe.txt"
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
gen_output = rerank_utils.BitextOutputFromGen(
predictions_bpe_file,
bpe_symbol=args.post_process,
nbest=using_nbest,
prefix_len=args.prefix_len,
target_prefix_frac=args.target_prefix_frac,
)
if rerank1_is_gen:
bitext1 = gen_output
else:
bitext1 = rerank_utils.BitextOutput(
score1_file,
args.backwards1,
args.right_to_left1,
args.post_process,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
if args.score_model2 is not None or args.nbest_list is not None:
if rerank2_is_gen:
bitext2 = gen_output
else:
bitext2 = rerank_utils.BitextOutput(
score2_file,
args.backwards2,
args.right_to_left2,
args.post_process,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
assert (
bitext2.source_lengths == bitext1.source_lengths
), "source lengths for rescoring models do not match"
assert (
bitext2.target_lengths == bitext1.target_lengths
), "target lengths for rescoring models do not match"
else:
if args.diff_bpe:
assert args.score_model2 is None
bitext2 = gen_output
else:
bitext2 = None
if args.language_model is not None:
lm_res1 = rerank_utils.LMOutput(
lm_score_file,
args.lm_dict,
args.prefix_len,
args.post_process,
args.target_prefix_frac,
)
else:
lm_res1 = None
gen_output_lst.append(gen_output)
bitext1_lst.append(bitext1)
bitext2_lst.append(bitext2)
lm_res1_lst.append(lm_res1)
return gen_output_lst, bitext1_lst, bitext2_lst, lm_res1_lst
def rerank(args):
if type(args.lenpen) is not list:
args.lenpen = [args.lenpen]
if type(args.weight1) is not list:
args.weight1 = [args.weight1]
if type(args.weight2) is not list:
args.weight2 = [args.weight2]
if type(args.weight3) is not list:
args.weight3 = [args.weight3]
if args.all_shards:
shard_ids = list(range(args.num_shards))
else:
shard_ids = [args.shard_id]
for shard_id in shard_ids:
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
rerank_generate.gen_and_reprocess_nbest(args)
rerank_score_bw.score_bw(args)
rerank_score_lm.score_lm(args)
if args.write_hypos is None:
write_targets = pre_gen + "/matched_targets"
write_hypos = pre_gen + "/matched_hypos"
else:
write_targets = args.write_hypos + "_targets" + args.gen_subset
write_hypos = args.write_hypos + "_hypos" + args.gen_subset
if args.all_shards:
write_targets += "_all_shards"
write_hypos += "_all_shards"
(
best_lenpen,
best_weight1,
best_weight2,
best_weight3,
best_score,
) = match_target_hypo(args, write_targets, write_hypos)
return best_lenpen, best_weight1, best_weight2, best_weight3, best_score
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
rerank(args)
if __name__ == "__main__":
cli_main()
| 14,097 | 31.862471 | 107 |
py
|
sign-topic
|
sign-topic-main/examples/noisychannel/rerank_options.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import options
def get_reranking_parser(default_task="translation"):
parser = options.get_parser("Generation and reranking", default_task)
add_reranking_args(parser)
return parser
def get_tuning_parser(default_task="translation"):
parser = options.get_parser("Reranking tuning", default_task)
add_reranking_args(parser)
add_tuning_args(parser)
return parser
def add_reranking_args(parser):
group = parser.add_argument_group("Reranking")
# fmt: off
group.add_argument('--score-model1', '-s1', type=str, metavar='FILE', required=True,
help='path to first model or ensemble of models for rescoring')
group.add_argument('--score-model2', '-s2', type=str, metavar='FILE', required=False,
help='path to second model or ensemble of models for rescoring')
group.add_argument('--num-rescore', '-n', type=int, metavar='N', default=10,
help='the number of candidate hypothesis to rescore')
group.add_argument('-bz', '--batch-size', type=int, metavar='N', default=128,
help='batch size for generating the nbest list')
group.add_argument('--gen-subset', default='test', metavar='SET', choices=['test', 'train', 'valid'],
help='data subset to generate (train, valid, test)')
group.add_argument('--gen-model', default=None, metavar='FILE',
help='the model to generate translations')
group.add_argument('-b1', '--backwards1', action='store_true',
help='whether or not the first model group is backwards')
group.add_argument('-b2', '--backwards2', action='store_true',
help='whether or not the second model group is backwards')
group.add_argument('-a', '--weight1', default=1, nargs='+', type=float,
help='the weight(s) of the first model')
group.add_argument('-b', '--weight2', default=1, nargs='+', type=float,
help='the weight(s) of the second model, or the gen model if using nbest from interactive.py')
group.add_argument('-c', '--weight3', default=1, nargs='+', type=float,
help='the weight(s) of the third model')
# lm arguments
group.add_argument('-lm', '--language-model', default=None, metavar='FILE',
help='language model for target language to rescore translations')
group.add_argument('--lm-dict', default=None, metavar='FILE',
help='the dict of the language model for the target language')
group.add_argument('--lm-name', default=None,
help='the name of the language model for the target language')
group.add_argument('--lm-bpe-code', default=None, metavar='FILE',
help='the bpe code for the language model for the target language')
group.add_argument('--data-dir-name', default=None,
help='name of data directory')
group.add_argument('--lenpen', default=1, nargs='+', type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--score-dict-dir', default=None,
help='the directory with dictionaries for the scoring models')
group.add_argument('--right-to-left1', action='store_true',
help='whether the first model group is a right to left model')
group.add_argument('--right-to-left2', action='store_true',
help='whether the second model group is a right to left model')
group.add_argument('--post-process', '--remove-bpe', default='@@ ',
help='the bpe symbol, used for the bitext and LM')
group.add_argument('--prefix-len', default=None, type=int,
help='the length of the target prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--sampling', action='store_true',
help='use sampling instead of beam search for generating n best list')
group.add_argument('--diff-bpe', action='store_true',
help='bpe for rescoring and nbest list not the same')
group.add_argument('--rescore-bpe-code', default=None,
help='bpe code for rescoring models')
group.add_argument('--nbest-list', default=None,
help='use predefined nbest list in interactive.py format')
group.add_argument('--write-hypos', default=None,
help='filename prefix to write hypos to')
group.add_argument('--ref-translation', default=None,
help='reference translation to use with nbest list from interactive.py')
group.add_argument('--backwards-score-dict-dir', default=None,
help='the directory with dictionaries for the backwards model,'
'if None then it is assumed the fw and backwards models share dictionaries')
# extra scaling args
group.add_argument('--gen-model-name', default=None,
help='the name of the models that generated the nbest list')
group.add_argument('--model1-name', default=None,
help='the name of the set for model1 group ')
group.add_argument('--model2-name', default=None,
help='the name of the set for model2 group')
group.add_argument('--shard-id', default=0, type=int,
help='the id of the shard to generate')
group.add_argument('--num-shards', default=1, type=int,
help='the number of shards to generate across')
group.add_argument('--all-shards', action='store_true',
help='use all shards')
group.add_argument('--target-prefix-frac', default=None, type=float,
help='the fraction of the target prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--source-prefix-frac', default=None, type=float,
help='the fraction of the source prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--normalize', action='store_true',
help='whether to normalize by src and target len')
# fmt: on
return group
def add_tuning_args(parser):
group = parser.add_argument_group("Tuning")
group.add_argument(
"--lower-bound",
default=[-0.7],
nargs="+",
type=float,
help="lower bound of search space",
)
group.add_argument(
"--upper-bound",
default=[3],
nargs="+",
type=float,
help="upper bound of search space",
)
group.add_argument(
"--tune-param",
default=["lenpen"],
nargs="+",
choices=["lenpen", "weight1", "weight2", "weight3"],
help="the parameter(s) to tune",
)
group.add_argument(
"--tune-subset",
default="valid",
choices=["valid", "test", "train"],
help="the subset to tune on ",
)
group.add_argument(
"--num-trials",
default=1000,
type=int,
help="number of trials to do for random search",
)
group.add_argument(
"--share-weights", action="store_true", help="share weight2 and weight 3"
)
return group
| 7,537 | 49.253333 | 117 |
py
|
sign-topic
|
sign-topic-main/examples/noisychannel/rerank_tune.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import random
import numpy as np
from fairseq import options
from examples.noisychannel import rerank, rerank_options
def random_search(args):
param_values = []
tuneable_parameters = ["lenpen", "weight1", "weight2", "weight3"]
initial_params = [args.lenpen, args.weight1, args.weight2, args.weight3]
for i, elem in enumerate(initial_params):
if type(elem) is not list:
initial_params[i] = [elem]
else:
initial_params[i] = elem
tune_parameters = args.tune_param.copy()
for i in range(len(args.tune_param)):
assert args.upper_bound[i] >= args.lower_bound[i]
index = tuneable_parameters.index(args.tune_param[i])
del tuneable_parameters[index]
del initial_params[index]
tune_parameters += tuneable_parameters
param_values += initial_params
random.seed(args.seed)
random_params = np.array(
[
[
random.uniform(args.lower_bound[i], args.upper_bound[i])
for i in range(len(args.tune_param))
]
for k in range(args.num_trials)
]
)
set_params = np.array(
[
[initial_params[i][0] for i in range(len(tuneable_parameters))]
for k in range(args.num_trials)
]
)
random_params = np.concatenate((random_params, set_params), 1)
rerank_args = vars(args).copy()
if args.nbest_list:
rerank_args["gen_subset"] = "test"
else:
rerank_args["gen_subset"] = args.tune_subset
for k in range(len(tune_parameters)):
rerank_args[tune_parameters[k]] = list(random_params[:, k])
if args.share_weights:
k = tune_parameters.index("weight2")
rerank_args["weight3"] = list(random_params[:, k])
rerank_args = argparse.Namespace(**rerank_args)
best_lenpen, best_weight1, best_weight2, best_weight3, best_score = rerank.rerank(
rerank_args
)
rerank_args = vars(args).copy()
rerank_args["lenpen"] = [best_lenpen]
rerank_args["weight1"] = [best_weight1]
rerank_args["weight2"] = [best_weight2]
rerank_args["weight3"] = [best_weight3]
# write the hypothesis from the valid set from the best trial
if args.gen_subset != "valid":
rerank_args["gen_subset"] = "valid"
rerank_args = argparse.Namespace(**rerank_args)
rerank.rerank(rerank_args)
# test with the best hyperparameters on gen subset
rerank_args = vars(args).copy()
rerank_args["gen_subset"] = args.gen_subset
rerank_args["lenpen"] = [best_lenpen]
rerank_args["weight1"] = [best_weight1]
rerank_args["weight2"] = [best_weight2]
rerank_args["weight3"] = [best_weight3]
rerank_args = argparse.Namespace(**rerank_args)
rerank.rerank(rerank_args)
def cli_main():
parser = rerank_options.get_tuning_parser()
args = options.parse_args_and_arch(parser)
random_search(args)
if __name__ == "__main__":
cli_main()
| 3,166 | 29.747573 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/noisychannel/rerank_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import re
import subprocess
from contextlib import redirect_stdout
from fairseq import options
from fairseq_cli import eval_lm, preprocess
def reprocess(fle):
# takes in a file of generate.py translation generate_output
# returns a source dict and hypothesis dict, where keys are the ID num (as a string)
# and values and the corresponding source and translation. There may be several translations
# per source, so the values for hypothesis_dict are lists.
# parses output of generate.py
with open(fle, "r") as f:
txt = f.read()
"""reprocess generate.py output"""
p = re.compile(r"[STHP][-]\d+\s*")
hp = re.compile(r"(\s*[-]?\d+[.]?\d+\s*)|(\s*(-inf)\s*)")
source_dict = {}
hypothesis_dict = {}
score_dict = {}
target_dict = {}
pos_score_dict = {}
lines = txt.split("\n")
for line in lines:
line += "\n"
prefix = re.search(p, line)
if prefix is not None:
assert len(prefix.group()) > 2, "prefix id not found"
_, j = prefix.span()
id_num = prefix.group()[2:]
id_num = int(id_num)
line_type = prefix.group()[0]
if line_type == "H":
h_txt = line[j:]
hypo = re.search(hp, h_txt)
assert (
hypo is not None
), "regular expression failed to find the hypothesis scoring"
_, i = hypo.span()
score = hypo.group()
if id_num in hypothesis_dict:
hypothesis_dict[id_num].append(h_txt[i:])
score_dict[id_num].append(float(score))
else:
hypothesis_dict[id_num] = [h_txt[i:]]
score_dict[id_num] = [float(score)]
elif line_type == "S":
source_dict[id_num] = line[j:]
elif line_type == "T":
target_dict[id_num] = line[j:]
elif line_type == "P":
pos_scores = (line[j:]).split()
pos_scores = [float(x) for x in pos_scores]
if id_num in pos_score_dict:
pos_score_dict[id_num].append(pos_scores)
else:
pos_score_dict[id_num] = [pos_scores]
return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
def reprocess_nbest(fle):
"""reprocess interactive.py output"""
with open(fle, "r") as f:
txt = f.read()
source_dict = {}
hypothesis_dict = {}
score_dict = {}
target_dict = {}
pos_score_dict = {}
lines = txt.split("\n")
hp = re.compile(r"[-]?\d+[.]?\d+")
j = -1
for _i, line in enumerate(lines):
line += "\n"
line_type = line[0]
if line_type == "H":
hypo = re.search(hp, line)
_, start_index = hypo.span()
score = hypo.group()
if j in score_dict:
score_dict[j].append(float(score))
hypothesis_dict[j].append(line[start_index:].strip("\t"))
else:
score_dict[j] = [float(score)]
hypothesis_dict[j] = [line[start_index:].strip("\t")]
elif line_type == "O":
j += 1
source_dict[j] = line[2:]
# we don't have the targets for interactive.py
target_dict[j] = "filler"
elif line_type == "P":
pos_scores = [float(pos_score) for pos_score in line.split()[1:]]
if j in pos_score_dict:
pos_score_dict[j].append(pos_scores)
else:
pos_score_dict[j] = [pos_scores]
assert source_dict.keys() == hypothesis_dict.keys()
assert source_dict.keys() == pos_score_dict.keys()
assert source_dict.keys() == score_dict.keys()
return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
def write_reprocessed(
sources,
hypos,
targets,
source_outfile,
hypo_outfile,
target_outfile,
right_to_left=False,
prefix_len=None,
bpe_symbol=None,
target_prefix_frac=None,
source_prefix_frac=None,
):
"""writes nbest hypothesis for rescoring"""
assert not (
prefix_len is not None and target_prefix_frac is not None
), "in writing reprocessed, only one type of prefix may be used"
assert not (
prefix_len is not None and source_prefix_frac is not None
), "in writing reprocessed, only one type of prefix may be used"
assert not (
target_prefix_frac is not None and source_prefix_frac is not None
), "in writing reprocessed, only one type of prefix may be used"
with open(source_outfile, "w") as source_file, open(
hypo_outfile, "w"
) as hypo_file, open(target_outfile, "w") as target_file:
assert len(sources) == len(hypos), "sources and hypos list length mismatch"
if right_to_left:
for i in range(len(sources)):
for j in range(len(hypos[i])):
if prefix_len is None:
hypo_file.write(make_right_to_left(hypos[i][j]) + "\n")
else:
raise NotImplementedError()
source_file.write(make_right_to_left(sources[i]) + "\n")
target_file.write(make_right_to_left(targets[i]) + "\n")
else:
for i in sorted(sources.keys()):
for j in range(len(hypos[i])):
if prefix_len is not None:
shortened = (
get_prefix_no_bpe(hypos[i][j], bpe_symbol, prefix_len)
+ "\n"
)
hypo_file.write(shortened)
source_file.write(sources[i])
target_file.write(targets[i])
elif target_prefix_frac is not None:
num_words, shortened, num_bpe_tokens = calc_length_from_frac(
hypos[i][j], target_prefix_frac, bpe_symbol
)
shortened += "\n"
hypo_file.write(shortened)
source_file.write(sources[i])
target_file.write(targets[i])
elif source_prefix_frac is not None:
num_words, shortened, num_bpe_tokensn = calc_length_from_frac(
sources[i], source_prefix_frac, bpe_symbol
)
shortened += "\n"
hypo_file.write(hypos[i][j])
source_file.write(shortened)
target_file.write(targets[i])
else:
hypo_file.write(hypos[i][j])
source_file.write(sources[i])
target_file.write(targets[i])
def calc_length_from_frac(bpe_sentence, prefix_frac, bpe_symbol):
# return number of words, (not bpe tokens) that we want
no_bpe_sen = remove_bpe(bpe_sentence, bpe_symbol)
len_sen = len(no_bpe_sen.split())
num_words = math.ceil(len_sen * prefix_frac)
prefix = get_prefix_no_bpe(bpe_sentence, bpe_symbol, num_words)
num_bpe_tokens = len(prefix.split())
return num_words, prefix, num_bpe_tokens
def get_prefix(sentence, prefix_len):
"""assuming no bpe, gets the prefix of the sentence with prefix_len words"""
tokens = sentence.strip("\n").split()
if prefix_len >= len(tokens):
return sentence.strip("\n")
else:
return " ".join(tokens[:prefix_len])
def get_prefix_no_bpe(sentence, bpe_symbol, prefix_len):
if bpe_symbol is None:
return get_prefix(sentence, prefix_len)
else:
return " ".join(get_prefix_from_len(sentence.split(), bpe_symbol, prefix_len))
def get_prefix_from_len(sentence, bpe_symbol, prefix_len):
"""get the prefix of sentence with bpe, with prefix len in terms of words, not bpe tokens"""
bpe_count = sum([bpe_symbol.strip(" ") in t for t in sentence[:prefix_len]])
if bpe_count == 0:
return sentence[:prefix_len]
else:
return sentence[:prefix_len] + get_prefix_from_len(
sentence[prefix_len:], bpe_symbol, bpe_count
)
def get_num_bpe_tokens_from_len(sentence, bpe_symbol, prefix_len):
"""given a prefix length in terms of words, return the number of bpe tokens"""
prefix = get_prefix_no_bpe(sentence, bpe_symbol, prefix_len)
assert len(remove_bpe(prefix, bpe_symbol).split()) <= prefix_len
return len(prefix.split(" "))
def make_right_to_left(line):
tokens = line.split()
tokens.reverse()
new_line = " ".join(tokens)
return new_line
def remove_bpe(line, bpe_symbol):
line = line.replace("\n", "")
line = (line + " ").replace(bpe_symbol, "").rstrip()
return line + ("\n")
def remove_bpe_dict(pred_dict, bpe_symbol):
new_dict = {}
for i in pred_dict:
if type(pred_dict[i]) == list:
new_list = [remove_bpe(elem, bpe_symbol) for elem in pred_dict[i]]
new_dict[i] = new_list
else:
new_dict[i] = remove_bpe(pred_dict[i], bpe_symbol)
return new_dict
def parse_bleu_scoring(line):
p = re.compile(r"(BLEU4 = )\d+[.]\d+")
res = re.search(p, line)
assert res is not None, line
return float(res.group()[8:])
def get_full_from_prefix(hypo_prefix, hypos):
"""given a hypo prefix, recover the first hypo from the list of complete hypos beginning with that prefix"""
for hypo in hypos:
hypo_prefix = hypo_prefix.strip("\n")
len_prefix = len(hypo_prefix)
if hypo[:len_prefix] == hypo_prefix:
return hypo
# no match found
raise Exception()
def get_score(
a,
b,
c,
target_len,
bitext_score1,
bitext_score2=None,
lm_score=None,
lenpen=None,
src_len=None,
tgt_len=None,
bitext1_backwards=False,
bitext2_backwards=False,
normalize=False,
):
if bitext1_backwards:
bitext1_norm = src_len
else:
bitext1_norm = tgt_len
if bitext_score2 is not None:
if bitext2_backwards:
bitext2_norm = src_len
else:
bitext2_norm = tgt_len
else:
bitext2_norm = 1
bitext_score2 = 0
if normalize:
score = (
a * bitext_score1 / bitext1_norm
+ b * bitext_score2 / bitext2_norm
+ c * lm_score / src_len
)
else:
score = a * bitext_score1 + b * bitext_score2 + c * lm_score
if lenpen is not None:
score /= (target_len) ** float(lenpen)
return score
class BitextOutput(object):
def __init__(
self,
output_file,
backwards,
right_to_left,
bpe_symbol,
prefix_len=None,
target_prefix_frac=None,
source_prefix_frac=None,
):
"""process output from rescoring"""
source, hypo, score, target, pos_score = reprocess(output_file)
if backwards:
self.hypo_fracs = source_prefix_frac
else:
self.hypo_fracs = target_prefix_frac
# remove length penalty so we can use raw scores
score, num_bpe_tokens = get_score_from_pos(
pos_score, prefix_len, hypo, bpe_symbol, self.hypo_fracs, backwards
)
source_lengths = {}
target_lengths = {}
assert hypo.keys() == source.keys(), "key mismatch"
if backwards:
tmp = hypo
hypo = source
source = tmp
for i in source:
# since we are reranking, there should only be one hypo per source sentence
if backwards:
len_src = len(source[i][0].split())
# record length without <eos>
if len_src == num_bpe_tokens[i][0] - 1:
source_lengths[i] = num_bpe_tokens[i][0] - 1
else:
source_lengths[i] = num_bpe_tokens[i][0]
target_lengths[i] = len(hypo[i].split())
source[i] = remove_bpe(source[i][0], bpe_symbol)
target[i] = remove_bpe(target[i], bpe_symbol)
hypo[i] = remove_bpe(hypo[i], bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
else:
len_tgt = len(hypo[i][0].split())
# record length without <eos>
if len_tgt == num_bpe_tokens[i][0] - 1:
target_lengths[i] = num_bpe_tokens[i][0] - 1
else:
target_lengths[i] = num_bpe_tokens[i][0]
source_lengths[i] = len(source[i].split())
if right_to_left:
source[i] = remove_bpe(make_right_to_left(source[i]), bpe_symbol)
target[i] = remove_bpe(make_right_to_left(target[i]), bpe_symbol)
hypo[i] = remove_bpe(make_right_to_left(hypo[i][0]), bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
else:
assert (
len(hypo[i]) == 1
), "expected only one hypothesis per source sentence"
source[i] = remove_bpe(source[i], bpe_symbol)
target[i] = remove_bpe(target[i], bpe_symbol)
hypo[i] = remove_bpe(hypo[i][0], bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
self.rescore_source = source
self.rescore_hypo = hypo
self.rescore_score = score
self.rescore_target = target
self.rescore_pos_score = pos_score
self.backwards = backwards
self.right_to_left = right_to_left
self.target_lengths = target_lengths
self.source_lengths = source_lengths
class BitextOutputFromGen(object):
def __init__(
self,
predictions_bpe_file,
bpe_symbol=None,
nbest=False,
prefix_len=None,
target_prefix_frac=None,
):
if nbest:
(
pred_source,
pred_hypo,
pred_score,
pred_target,
pred_pos_score,
) = reprocess_nbest(predictions_bpe_file)
else:
pred_source, pred_hypo, pred_score, pred_target, pred_pos_score = reprocess(
predictions_bpe_file
)
assert len(pred_source) == len(pred_hypo)
assert len(pred_source) == len(pred_score)
assert len(pred_source) == len(pred_target)
assert len(pred_source) == len(pred_pos_score)
# remove length penalty so we can use raw scores
pred_score, num_bpe_tokens = get_score_from_pos(
pred_pos_score, prefix_len, pred_hypo, bpe_symbol, target_prefix_frac, False
)
self.source = pred_source
self.target = pred_target
self.score = pred_score
self.pos_score = pred_pos_score
self.hypo = pred_hypo
self.target_lengths = {}
self.source_lengths = {}
self.no_bpe_source = remove_bpe_dict(pred_source.copy(), bpe_symbol)
self.no_bpe_hypo = remove_bpe_dict(pred_hypo.copy(), bpe_symbol)
self.no_bpe_target = remove_bpe_dict(pred_target.copy(), bpe_symbol)
# indexes to match those from the rescoring models
self.rescore_source = {}
self.rescore_target = {}
self.rescore_pos_score = {}
self.rescore_hypo = {}
self.rescore_score = {}
self.num_hypos = {}
self.backwards = False
self.right_to_left = False
index = 0
for i in sorted(pred_source.keys()):
for j in range(len(pred_hypo[i])):
self.target_lengths[index] = len(self.hypo[i][j].split())
self.source_lengths[index] = len(self.source[i].split())
self.rescore_source[index] = self.no_bpe_source[i]
self.rescore_target[index] = self.no_bpe_target[i]
self.rescore_hypo[index] = self.no_bpe_hypo[i][j]
self.rescore_score[index] = float(pred_score[i][j])
self.rescore_pos_score[index] = pred_pos_score[i][j]
self.num_hypos[index] = len(pred_hypo[i])
index += 1
def get_score_from_pos(
pos_score_dict, prefix_len, hypo_dict, bpe_symbol, hypo_frac, backwards
):
score_dict = {}
num_bpe_tokens_dict = {}
assert prefix_len is None or hypo_frac is None
for key in pos_score_dict:
score_dict[key] = []
num_bpe_tokens_dict[key] = []
for i in range(len(pos_score_dict[key])):
if prefix_len is not None and not backwards:
num_bpe_tokens = get_num_bpe_tokens_from_len(
hypo_dict[key][i], bpe_symbol, prefix_len
)
score_dict[key].append(sum(pos_score_dict[key][i][:num_bpe_tokens]))
num_bpe_tokens_dict[key].append(num_bpe_tokens)
elif hypo_frac is not None:
num_words, shortened, hypo_prefix_len = calc_length_from_frac(
hypo_dict[key][i], hypo_frac, bpe_symbol
)
score_dict[key].append(sum(pos_score_dict[key][i][:hypo_prefix_len]))
num_bpe_tokens_dict[key].append(hypo_prefix_len)
else:
score_dict[key].append(sum(pos_score_dict[key][i]))
num_bpe_tokens_dict[key].append(len(pos_score_dict[key][i]))
return score_dict, num_bpe_tokens_dict
class LMOutput(object):
def __init__(
self,
lm_score_file,
lm_dict=None,
prefix_len=None,
bpe_symbol=None,
target_prefix_frac=None,
):
(
lm_sentences,
lm_sen_scores,
lm_sen_pos_scores,
lm_no_bpe_sentences,
lm_bpe_tokens,
) = parse_lm(
lm_score_file,
prefix_len=prefix_len,
bpe_symbol=bpe_symbol,
target_prefix_frac=target_prefix_frac,
)
self.sentences = lm_sentences
self.score = lm_sen_scores
self.pos_score = lm_sen_pos_scores
self.lm_dict = lm_dict
self.no_bpe_sentences = lm_no_bpe_sentences
self.bpe_tokens = lm_bpe_tokens
def parse_lm(input_file, prefix_len=None, bpe_symbol=None, target_prefix_frac=None):
"""parse output of eval_lm"""
with open(input_file, "r") as f:
text = f.readlines()
text = text[7:]
cleaned_text = text[:-2]
sentences = {}
sen_scores = {}
sen_pos_scores = {}
no_bpe_sentences = {}
num_bpe_tokens_dict = {}
for _i, line in enumerate(cleaned_text):
tokens = line.split()
if tokens[0].isdigit():
line_id = int(tokens[0])
scores = [float(x[1:-1]) for x in tokens[2::2]]
sentences[line_id] = " ".join(tokens[1::2][:-1]) + "\n"
if bpe_symbol is not None:
# exclude <eos> symbol to match output from generate.py
bpe_sen = " ".join(tokens[1::2][:-1]) + "\n"
no_bpe_sen = remove_bpe(bpe_sen, bpe_symbol)
no_bpe_sentences[line_id] = no_bpe_sen
if prefix_len is not None:
num_bpe_tokens = get_num_bpe_tokens_from_len(
bpe_sen, bpe_symbol, prefix_len
)
sen_scores[line_id] = sum(scores[:num_bpe_tokens])
num_bpe_tokens_dict[line_id] = num_bpe_tokens
elif target_prefix_frac is not None:
num_words, shortened, target_prefix_len = calc_length_from_frac(
bpe_sen, target_prefix_frac, bpe_symbol
)
sen_scores[line_id] = sum(scores[:target_prefix_len])
num_bpe_tokens_dict[line_id] = target_prefix_len
else:
sen_scores[line_id] = sum(scores)
num_bpe_tokens_dict[line_id] = len(scores)
sen_pos_scores[line_id] = scores
return sentences, sen_scores, sen_pos_scores, no_bpe_sentences, num_bpe_tokens_dict
def get_directories(
data_dir_name,
num_rescore,
gen_subset,
fw_name,
shard_id,
num_shards,
sampling=False,
prefix_len=None,
target_prefix_frac=None,
source_prefix_frac=None,
):
nbest_file_id = (
"nbest_"
+ str(num_rescore)
+ "_subset_"
+ gen_subset
+ "_fw_name_"
+ fw_name
+ "_shard_"
+ str(shard_id)
+ "_of_"
+ str(num_shards)
)
if sampling:
nbest_file_id += "_sampling"
# the directory containing all information for this nbest list
pre_gen = (
os.path.join(os.path.dirname(__file__))
+ "/rerank_data/"
+ data_dir_name
+ "/"
+ nbest_file_id
)
# the directory to store the preprocessed nbest list, for left to right rescoring
left_to_right_preprocessed_dir = pre_gen + "/left_to_right_preprocessed"
if source_prefix_frac is not None:
left_to_right_preprocessed_dir = (
left_to_right_preprocessed_dir + "/prefix_frac" + str(source_prefix_frac)
)
# the directory to store the preprocessed nbest list, for right to left rescoring
right_to_left_preprocessed_dir = pre_gen + "/right_to_left_preprocessed"
# the directory to store the preprocessed nbest list, for backwards rescoring
backwards_preprocessed_dir = pre_gen + "/backwards"
if target_prefix_frac is not None:
backwards_preprocessed_dir = (
backwards_preprocessed_dir + "/prefix_frac" + str(target_prefix_frac)
)
elif prefix_len is not None:
backwards_preprocessed_dir = (
backwards_preprocessed_dir + "/prefix_" + str(prefix_len)
)
# the directory to store the preprocessed nbest list, for rescoring with P(T)
lm_preprocessed_dir = pre_gen + "/lm_preprocessed"
return (
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
)
def lm_scoring(
preprocess_directory,
bpe_status,
gen_output,
pre_gen,
cur_lm_dict,
cur_lm_name,
cur_language_model,
cur_lm_bpe_code,
batch_size,
lm_score_file,
target_lang,
source_lang,
prefix_len=None,
):
if prefix_len is not None:
assert (
bpe_status == "different"
), "bpe status must be different to use prefix len"
if bpe_status == "no bpe":
# run lm on output without bpe
write_reprocessed(
gen_output.no_bpe_source,
gen_output.no_bpe_hypo,
gen_output.no_bpe_target,
pre_gen + "/rescore_data_no_bpe.de",
pre_gen + "/rescore_data_no_bpe.en",
pre_gen + "/reference_file_no_bpe",
)
preprocess_lm_param = [
"--only-source",
"--trainpref",
pre_gen + "/rescore_data_no_bpe." + target_lang,
"--srcdict",
cur_lm_dict,
"--destdir",
preprocess_directory,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [
preprocess_directory,
"--path",
cur_language_model,
"--output-word-probs",
"--batch-size",
str(batch_size),
"--max-tokens",
"1024",
"--sample-break-mode",
"eos",
"--gen-subset",
"train",
]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, "w") as f:
with redirect_stdout(f):
eval_lm.main(input_args)
elif bpe_status == "shared":
preprocess_lm_param = [
"--only-source",
"--trainpref",
pre_gen + "/rescore_data." + target_lang,
"--srcdict",
cur_lm_dict,
"--destdir",
preprocess_directory,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [
preprocess_directory,
"--path",
cur_language_model,
"--output-word-probs",
"--batch-size",
str(batch_size),
"--sample-break-mode",
"eos",
"--gen-subset",
"train",
]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, "w") as f:
with redirect_stdout(f):
eval_lm.main(input_args)
elif bpe_status == "different":
rescore_file = pre_gen + "/rescore_data_no_bpe"
rescore_bpe = pre_gen + "/rescore_data_new_bpe"
rescore_file += "."
rescore_bpe += "."
write_reprocessed(
gen_output.no_bpe_source,
gen_output.no_bpe_hypo,
gen_output.no_bpe_target,
rescore_file + source_lang,
rescore_file + target_lang,
pre_gen + "/reference_file_no_bpe",
bpe_symbol=None,
)
# apply LM bpe to nbest list
bpe_src_param = [
"-c",
cur_lm_bpe_code,
"--input",
rescore_file + target_lang,
"--output",
rescore_bpe + target_lang,
]
subprocess.call(
[
"python",
os.path.join(
os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py"
),
]
+ bpe_src_param,
shell=False,
)
# uncomment to use fastbpe instead of subword-nmt bpe
# bpe_src_param = [rescore_bpe+target_lang, rescore_file+target_lang, cur_lm_bpe_code]
# subprocess.call(["/private/home/edunov/fastBPE/fast", "applybpe"] + bpe_src_param, shell=False)
preprocess_dir = preprocess_directory
preprocess_lm_param = [
"--only-source",
"--trainpref",
rescore_bpe + target_lang,
"--srcdict",
cur_lm_dict,
"--destdir",
preprocess_dir,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [
preprocess_dir,
"--path",
cur_language_model,
"--output-word-probs",
"--batch-size",
str(batch_size),
"--max-tokens",
"1024",
"--sample-break-mode",
"eos",
"--gen-subset",
"train",
]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, "w") as f:
with redirect_stdout(f):
eval_lm.main(input_args)
def rescore_file_name(
nbest_dir,
prefix_len,
scorer_name,
lm_file=False,
target_prefix_frac=None,
source_prefix_frac=None,
backwards=None,
):
if lm_file:
score_file = nbest_dir + "/lm_score_translations_model_" + scorer_name + ".txt"
else:
score_file = nbest_dir + "/" + scorer_name + "_score_translations.txt"
if backwards:
if prefix_len is not None:
score_file += "prefix_len" + str(prefix_len)
elif target_prefix_frac is not None:
score_file += "target_prefix_frac" + str(target_prefix_frac)
else:
if source_prefix_frac is not None:
score_file += "source_prefix_frac" + str(source_prefix_frac)
return score_file
| 28,678 | 32.700353 | 112 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/ulm/sample.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Sample from a trained LM; hacked fairseq-interactive
"""
from collections import namedtuple
import os
import ast
import numpy as np
from fairseq import checkpoint_utils, options, tasks, utils
import tqdm
Batch = namedtuple('Batch', 'ids src_tokens src_lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def make_batches(lines, args, task, max_positions):
tokens = [
task.source_dictionary.encode_line(
src_str, add_if_not_exist=False
).long()
for src_str in lines
]
lengths = [t.numel() for t in tokens]
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.dataset.max_tokens,
max_sentences=args.dataset.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=args.dataset.skip_invalid_size_inputs_valid_test
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch['id'],
src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'],
)
def main(args):
arg_prompts = args.prompts
arg_output = args.output
arg_debug = args.debug
arg_sample_size = args.samples_per_prompt
try:
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
args = convert_namespace_to_omegaconf(args)
except:
pass
# if args.max_tokens is None and args.max_sentences is None:
if args.common.seed is not None:
np.random.seed(args.common.seed)
utils.set_torch_seed(args.common.seed)
if args.generation.sampling:
args.generation.nbest = args.generation.beam = arg_sample_size
task = tasks.setup_task(args.task)
overrides = ast.literal_eval(args.common_eval.model_overrides)
models, _model_args = checkpoint_utils.load_model_ensemble(
args.common_eval.path.split(os.pathsep),
arg_overrides=overrides,
task=task,
suffix=getattr(args, "checkpoint_suffix", ""),
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.prepare_for_inference_(args)
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.generation.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
)
output_file = open(arg_output, 'w')
with open(arg_prompts, 'r') as fin:
lines = fin.readlines()
split = [x.split('|', 1) for x in lines]
seq_id = [x[0] for x in split]
prompts = [x[1] for x in split]
if args.generation.prefix_size >= 0:
prompts = [' '.join(l.split()[:args.generation.prefix_size])
for l in prompts]
if arg_debug:
prompts = prompts[:10]
generator = task.build_generator(models, args.generation)
start_id = 0
pbar = tqdm.tqdm(total=len(prompts))
for batch in make_batches(prompts, args, task, max_positions):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
}
results = []
translations = task.inference_step(generator, models, sample)
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append((i + start_id, src_tokens_i, hypos))
# sort output to match input order
for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]):
if src_dict is not None:
src_str = src_dict.string(
src_tokens, args.common_eval.post_process)
# Process top predictions
for hypo_id, hypo in enumerate(hypos):
_hypo_tokens, hypo_str, _alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.common_eval.post_process,
)
detok_hypo_str = hypo_str
utterance = detok_hypo_str
print(f'{seq_id[id]}__{hypo_id}|{utterance}', file=output_file)
pbar.update(1)
start_id += len(results)
# output_file.close()
def cli_main():
parser = options.get_interactive_generation_parser()
parser.add_argument('--prompts', type=str, default=None, required=True)
parser.add_argument('--output', type=str, default=None, required=True)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--samples-per-prompt', type=int, default=1)
args = options.parse_args_and_arch(parser)
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
main(args)
if __name__ == '__main__':
cli_main()
| 5,623 | 31.137143 | 103 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/tools/resynthesize_speech.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import logging
import os
import joblib
import soundfile as sf
import torch
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_feature_reader
from examples.textless_nlp.gslm.unit2speech.tts_data import TacotronInputDataset
from examples.textless_nlp.gslm.unit2speech.utils import (
load_tacotron,
load_waveglow,
synthesize_audio,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(description="GSLM U2S tool")
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
required=True,
help="Acoustic feature type",
)
parser.add_argument(
"--acoustic_model_path",
type=str,
help="Pretrained acoustic model checkpoint",
)
parser.add_argument("--layer", type=int, help="Layer of acoustic model")
parser.add_argument(
"--kmeans_model_path",
type=str,
required=True,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--tts_model_path",
type=str,
help="TTS model file path to use for inference",
)
parser.add_argument(
"--code_dict_path",
type=str,
help="Code dict file path to use for inference",
)
parser.add_argument(
"--waveglow_path",
type=str,
help="Waveglow (vocoder) model file path to use for inference",
)
parser.add_argument("--max_decoder_steps", type=int, default=2000)
parser.add_argument("--denoiser_strength", type=float, default=0.1)
return parser
################################################
def main(args, logger):
# Acoustic Model
logger.info(f"Loading acoustic model from {args.tts_model_path}...")
feature_reader_cls = get_feature_reader(args.feature_type)
reader = feature_reader_cls(
checkpoint_path=args.acoustic_model_path, layer=args.layer
)
# K-means Model
logger.info(f"Loading K-means model from {args.kmeans_model_path} ...")
kmeans_model = joblib.load(open(args.kmeans_model_path, "rb"))
kmeans_model.verbose = False
# TTS Model
logger.info(f"Loading TTS model from {args.tts_model_path}...")
tacotron_model, sample_rate, hparams = load_tacotron(
tacotron_model_path=args.tts_model_path,
max_decoder_steps=args.max_decoder_steps,
)
# Waveglow Model
logger.info(f"Loading Waveglow model from {args.waveglow_path}...")
waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path)
# Dataset
if not os.path.exists(hparams.code_dict):
hparams.code_dict = args.code_dict_path
tts_dataset = TacotronInputDataset(hparams)
iters = 0
while True:
in_file_path = input("Input: Enter the full file path of audio file...\n")
out_file_path = input("Output: Enter the full file path of audio file...\n")
feats = reader.get_feats(in_file_path).cpu().numpy()
iters += 1
if iters == 1000:
gc.collect()
torch.cuda.empty_cache()
quantized_units = kmeans_model.predict(feats)
quantized_units_str = " ".join(map(str, quantized_units))
tts_input = tts_dataset.get_tensor(quantized_units_str)
mel, aud, aud_dn, has_eos = synthesize_audio(
tacotron_model,
waveglow,
denoiser,
tts_input.unsqueeze(0),
strength=args.denoiser_strength,
)
sf.write(f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate)
logger.info("Resynthesis done!\n")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| 4,183 | 30.458647 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tts_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from examples.textless_nlp.gslm.unit2speech.tacotron2.text import (
EOS_TOK,
SOS_TOK,
code_to_sequence,
text_to_sequence,
)
from examples.textless_nlp.gslm.unit2speech.tacotron2.utils import (
load_code_dict,
)
class TacotronInputDataset:
def __init__(self, hparams, append_str=""):
self.is_text = getattr(hparams, "text_or_code", "text") == "text"
if not self.is_text:
self.code_dict = load_code_dict(
hparams.code_dict, hparams.add_sos, hparams.add_eos
)
self.code_key = hparams.code_key
self.add_sos = hparams.add_sos
self.add_eos = hparams.add_eos
self.collapse_code = hparams.collapse_code
self.append_str = append_str
def process_code(self, inp_str):
inp_toks = inp_str.split()
if self.add_sos:
inp_toks = [SOS_TOK] + inp_toks
if self.add_eos:
inp_toks = inp_toks + [EOS_TOK]
return code_to_sequence(inp_toks, self.code_dict, self.collapse_code)
def process_text(self, inp_str):
return text_to_sequence(inp_str, ["english_cleaners"])
def get_tensor(self, inp_str):
# uid, txt, inp_str = self._get_data(idx)
inp_str = inp_str + self.append_str
if self.is_text:
inp_toks = self.process_text(inp_str)
else:
inp_toks = self.process_code(inp_str)
return torch.from_numpy(np.array(inp_toks)).long()
def __len__(self):
return len(self.data)
| 1,733 | 30.527273 | 77 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/multiproc.py
|
import os
import time
import torch
import sys
import subprocess
argslist = list(sys.argv)[1:]
log_dir = argslist[-1]
num_gpus = torch.cuda.device_count()
argslist.append('--n_gpus={}'.format(num_gpus))
workers = []
job_id = time.strftime("%Y_%m_%d-%H%M%S")
argslist.append("--group_name=group_{}".format(job_id))
print("GPU log directory is {}".format(log_dir))
os.makedirs(log_dir, exist_ok=True)
for i in range(num_gpus):
argslist.append('--rank={}'.format(i))
stdout = None if i == 0 else open("{}/{}_GPU_{}.log".format(log_dir, job_id, i),
"w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
argslist = argslist[:-1]
for p in workers:
p.wait()
| 772 | 26.607143 | 84 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from examples.textless_nlp.gslm.unit2speech.tacotron2.model import Tacotron2
from examples.textless_nlp.gslm.unit2speech.tacotron2.waveglow_denoiser import (
Denoiser,
)
def load_quantized_audio_from_file(file_path):
base_fname_batch, quantized_units_batch = [], []
with open(file_path) as f:
for line in f:
base_fname, quantized_units_str = line.rstrip().split("|")
quantized_units = [int(q) for q in quantized_units_str.split(" ")]
base_fname_batch.append(base_fname)
quantized_units_batch.append(quantized_units)
return base_fname_batch, quantized_units_batch
def synthesize_audio(model, waveglow, denoiser, inp, lab=None, strength=0.0):
assert inp.size(0) == 1
inp = inp.cuda()
if lab is not None:
lab = torch.LongTensor(1).cuda().fill_(lab)
with torch.no_grad():
_, mel, _, ali, has_eos = model.inference(inp, lab, ret_has_eos=True)
aud = waveglow.infer(mel, sigma=0.666)
aud_dn = denoiser(aud, strength=strength).squeeze(1)
return mel, aud, aud_dn, has_eos
def load_tacotron(tacotron_model_path, max_decoder_steps):
ckpt_dict = torch.load(tacotron_model_path)
hparams = ckpt_dict["hparams"]
hparams.max_decoder_steps = max_decoder_steps
sr = hparams.sampling_rate
model = Tacotron2(hparams)
model.load_state_dict(ckpt_dict["model_dict"])
model = model.cuda().eval().half()
return model, sr, hparams
def load_waveglow(waveglow_path):
waveglow = torch.load(waveglow_path)["model"]
waveglow = waveglow.cuda().eval().half()
for k in waveglow.convinv:
k.float()
denoiser = Denoiser(waveglow)
return waveglow, denoiser
| 1,904 | 33.017857 | 80 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/glow.py
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import copy
import torch
from torch.autograd import Variable
import torch.nn.functional as F
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a+input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output):
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total - log_det_W_total
return loss/(z.size(0)*z.size(1)*z.size(2))
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:,0] = -1*W[:,0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W)
z = self.conv(z)
return z, log_det_W
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary difference
from WaveNet is the convolutions need not be causal. There is also no dilation
size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size*dilation - dilation)/2)
in_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2*n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
output = torch.zeros_like(audio)
n_channels_tensor = torch.IntTensor([self.n_channels])
spect = self.cond_layer(spect)
for i in range(self.n_layers):
spect_offset = i*2*self.n_channels
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
spect[:,spect_offset:spect_offset+2*self.n_channels,:],
n_channels_tensor)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = audio + res_skip_acts[:,:self.n_channels,:]
output = output + res_skip_acts[:,self.n_channels:,:]
else:
output = output + res_skip_acts
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group/2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size/2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels*n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels # Useful during inference
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:,:self.n_early_size,:])
audio = audio[:,self.n_early_size:,:]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1)/2)
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s)*audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1],1)
output_audio.append(audio)
return torch.cat(output_audio,1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
if spect.type() == 'torch.cuda.HalfTensor':
audio = torch.cuda.HalfTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
else:
audio = torch.cuda.FloatTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
audio = torch.autograd.Variable(sigma*audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1)/2)
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b)/torch.exp(s)
audio = torch.cat([audio_0, audio_1],1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
if spect.type() == 'torch.cuda.HalfTensor':
z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
else:
z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
audio = torch.cat((sigma*z, audio),1)
audio = audio.permute(0,2,1).contiguous().view(audio.size(0), -1).data
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
| 12,653 | 39.557692 | 105 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/synthesize_audio_from_units.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import soundfile as sf
from examples.textless_nlp.gslm.unit2speech.tts_data import (
TacotronInputDataset,
)
from examples.textless_nlp.gslm.unit2speech.utils import (
load_quantized_audio_from_file,
load_tacotron,
load_waveglow,
synthesize_audio,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(
description="Wav2Vec 2.0 speech generator."
)
parser.add_argument(
"--quantized_unit_path",
type=str,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--tts_model_path",
type=str,
help="TTS model file path to use for inference",
)
parser.add_argument(
"--waveglow_path",
type=str,
help="Path to the waveglow checkpoint (vocoder).",
)
parser.add_argument(
"--code_dict_path",
type=str,
help="Code dict file path to use for inference",
)
parser.add_argument("--max_decoder_steps", type=int, default=2000)
parser.add_argument("--denoiser_strength", type=float, default=0.1)
parser.add_argument(
"--out_audio_dir",
type=str,
help="Output directory to dump audio files",
)
return parser
def main(args, logger):
# Load quantized audio
logger.info(f"Loading quantized audio from {args.quantized_unit_path}...")
names_batch, quantized_units_batch = load_quantized_audio_from_file(
file_path=args.quantized_unit_path
)
logger.info(f"Loading TTS model from {args.tts_model_path}...")
tacotron_model, sample_rate, hparams = load_tacotron(
tacotron_model_path=args.tts_model_path,
max_decoder_steps=args.max_decoder_steps,
)
logger.info(f"Loading Waveglow model from {args.waveglow_path}...")
waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path)
if not os.path.exists(hparams.code_dict):
hparams.code_dict = args.code_dict_path
tts_dataset = TacotronInputDataset(hparams)
for name, quantized_units in zip(names_batch, quantized_units_batch):
quantized_units_str = " ".join(map(str, quantized_units))
tts_input = tts_dataset.get_tensor(quantized_units_str)
mel, aud, aud_dn, has_eos = synthesize_audio(
tacotron_model,
waveglow,
denoiser,
tts_input.unsqueeze(0),
strength=args.denoiser_strength,
)
out_file_path = os.path.join(args.out_audio_dir, f"{name}.wav")
sf.write(
f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate
)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| 3,178 | 28.990566 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/convert_to_16k.py
|
import os
import shlex
import subprocess
import progressbar
from time import time
from pathlib import Path
def find_all_files(path_dir, extension):
out = []
for root, dirs, filenames in os.walk(path_dir):
for f in filenames:
if f.endswith(extension):
out.append(((str(Path(f).stem)), os.path.join(root, f)))
return out
def convert16k(inputfile, outputfile16k):
command = ('sox -c 1 -b 16 {} -t wav {} rate 16k'.format(inputfile, outputfile16k))
subprocess.call(shlex.split(command))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Convert to wav 16k audio using sox.')
parser.add_argument('input_dir', type=str,
help='Path to the input dir.')
parser.add_argument('output_dir', type=str,
help='Path to the output dir.')
parser.add_argument('--extension', type=str, default='wav',
help='Audio file extension in the input. Default: mp3')
args = parser.parse_args()
# Find all sequences
print(f"Finding all audio files with extension '{args.extension}' from {args.input_dir}...")
audio_files = find_all_files(args.input_dir, args.extension)
print(f"Done! Found {len(audio_files)} files.")
# Convert to relative path
audio_files = [os.path.relpath(file[-1], start=args.input_dir) for file in audio_files]
# Create all the directories needed
rel_dirs_set = set([os.path.dirname(file) for file in audio_files])
for rel_dir in rel_dirs_set:
Path(os.path.join(args.output_dir, rel_dir)).mkdir(parents=True, exist_ok=True)
# Converting wavs files
print("Converting the audio to wav files...")
bar = progressbar.ProgressBar(maxval=len(audio_files))
bar.start()
start_time = time()
for index, file in enumerate(audio_files):
bar.update(index)
input_file = os.path.join(args.input_dir, file)
output_file = os.path.join(args.output_dir, os.path.splitext(file)[0]+".wav")
convert16k(input_file, output_file)
bar.finish()
print(f"...done {len(audio_files)} files in {time()-start_time} seconds.")
| 2,177 | 37.892857 | 96 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/stft.py
|
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from .audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| 5,893 | 40.507042 | 97 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/cleaners.py
|
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| 2,439 | 25.813187 | 95 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import io
import json
import librosa
import numpy as np
import soundfile as sf
import time
import torch
from scipy.io.wavfile import read
from .text import SOS_TOK, EOS_TOK
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, out=torch.cuda.LongTensor(max_len))
mask = (ids < lengths.unsqueeze(1))
return mask
def load_wav_to_torch(full_path, sr=None):
data, sr = librosa.load(full_path, sr=sr)
data = np.clip(data, -1, 1) # potentially out of [-1, 1] due to resampling
data = data * 32768.0 # match values loaded by scipy
return torch.FloatTensor(data.astype(np.float32)), sr
def read_binary_audio(bin_data, tar_sr=None):
"""
read binary audio (`bytes` or `uint8` `numpy.ndarray`) to `float32`
`numpy.ndarray`
RETURNS:
data (np.ndarray) : audio of shape (n,) or (2, n)
tar_sr (int) : sample rate
"""
data, ori_sr = sf.read(io.BytesIO(bin_data), dtype='float32')
data = data.T
if (tar_sr is not None) and (ori_sr != tar_sr):
data = librosa.resample(data, ori_sr, tar_sr)
else:
tar_sr = ori_sr
data = np.clip(data, -1, 1)
data = data * 32768.0
return torch.FloatTensor(data.astype(np.float32)), tar_sr
def load_filepaths_and_text(filename):
with open(filename, encoding='utf-8') as f:
data = [json.loads(line.rstrip()) for line in f]
return data
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
def load_code_dict(path, add_sos=False, add_eos=False):
if not path:
return {}
with open(path, 'r') as f:
codes = ['_'] + [line.rstrip() for line in f] # '_' for pad
code_dict = {c: i for i, c in enumerate(codes)}
if add_sos:
code_dict[SOS_TOK] = len(code_dict)
if add_eos:
code_dict[EOS_TOK] = len(code_dict)
assert(set(code_dict.values()) == set(range(len(code_dict))))
return code_dict
def load_obs_label_dict(path):
if not path:
return {}
with open(path, 'r') as f:
obs_labels = [line.rstrip() for line in f]
return {c: i for i, c in enumerate(obs_labels)}
# A simple timer class inspired from `tnt.TimeMeter`
class CudaTimer:
def __init__(self, keys):
self.keys = keys
self.reset()
def start(self, key):
s = torch.cuda.Event(enable_timing=True)
s.record()
self.start_events[key].append(s)
return self
def stop(self, key):
e = torch.cuda.Event(enable_timing=True)
e.record()
self.end_events[key].append(e)
return self
def reset(self):
self.start_events = collections.defaultdict(list)
self.end_events = collections.defaultdict(list)
self.running_times = collections.defaultdict(float)
self.n = collections.defaultdict(int)
return self
def value(self):
self._synchronize()
return {k: self.running_times[k] / self.n[k] for k in self.keys}
def _synchronize(self):
torch.cuda.synchronize()
for k in self.keys:
starts = self.start_events[k]
ends = self.end_events[k]
if len(starts) == 0:
raise ValueError("Trying to divide by zero in TimeMeter")
if len(ends) != len(starts):
raise ValueError("Call stop before checking value!")
time = 0
for start, end in zip(starts, ends):
time += start.elapsed_time(end)
self.running_times[k] += time * 1e-3
self.n[k] += len(starts)
self.start_events = collections.defaultdict(list)
self.end_events = collections.defaultdict(list)
# Used to measure the time taken for multiple events
class Timer:
def __init__(self, keys):
self.keys = keys
self.n = {}
self.running_time = {}
self.total_time = {}
self.reset()
def start(self, key):
self.running_time[key] = time.time()
return self
def stop(self, key):
self.total_time[key] = time.time() - self.running_time[key]
self.n[key] += 1
self.running_time[key] = None
return self
def reset(self):
for k in self.keys:
self.total_time[k] = 0
self.running_time[k] = None
self.n[k] = 0
return self
def value(self):
vals = {}
for k in self.keys:
if self.n[k] == 0:
raise ValueError("Trying to divide by zero in TimeMeter")
else:
vals[k] = self.total_time[k] / self.n[k]
return vals
| 4,918 | 27.598837 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/model.py
|
from math import sqrt
import torch
import torch.distributions as distr
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from .layers import ConvNorm, LinearNorm, GlobalAvgPool
from .utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, hparams):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
for i in range(1, hparams.postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim,
hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(hparams.n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)
x = F.dropout(self.convolutions[-1](x), 0.5, self.training)
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, hparams):
super(Encoder, self).__init__()
convolutions = []
for _ in range(hparams.encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
kernel_size=hparams.encoder_kernel_size, stride=1,
padding=int((hparams.encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(hparams.encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.encoder_embedding_dim,
int(hparams.encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def inference(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class AudioEncoder(nn.Module):
def __init__(self, hparams):
super(AudioEncoder, self).__init__()
assert hparams.lat_dim > 0
convolutions = []
inp_dim = hparams.n_mel_channels
for _ in range(hparams.lat_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(inp_dim, hparams.lat_n_filters,
kernel_size=hparams.lat_kernel_size, stride=1,
padding=int((hparams.lat_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.lat_n_filters))
inp_dim = hparams.lat_n_filters
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.lat_n_filters,
int(hparams.lat_n_filters / 2),
hparams.lat_n_blstms, batch_first=True,
bidirectional=True)
self.pool = GlobalAvgPool()
self.mu_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim)
self.logvar_proj = LinearNorm(hparams.lat_n_filters, hparams.lat_dim)
self.lat_dim = hparams.lat_dim
def forward(self, x, lengths):
"""
Args:
x (torch.Tensor): (B, F, T)
"""
for conv in self.convolutions:
x = F.dropout(F.tanh(conv(x)), 0.5, self.training)
x = x.transpose(1, 2) # (B, T, D)
# x may not be sorted by length. Sort->process->unsort
max_len = x.size(1)
assert max_len == torch.max(lengths).item()
lengths, perm_idx = lengths.sort(0, descending=True)
x = x[perm_idx]
x = nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
_, unperm_idx = perm_idx.sort(0)
outputs = outputs[unperm_idx] # (B, T, D)
lengths = lengths[unperm_idx] # (B, T, D)
outputs = self.pool(outputs, lengths) # (B, D)
mu = self.mu_proj(outputs)
logvar = self.logvar_proj(outputs)
z = distr.Normal(mu, logvar).rsample()
return z, mu, logvar
class Decoder(nn.Module):
def __init__(self, hparams):
super(Decoder, self).__init__()
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.encoder_embedding_dim = hparams.encoder_embedding_dim
self.obs_dim = hparams.obs_dim
self.lat_dim = hparams.lat_dim
self.attention_rnn_dim = hparams.attention_rnn_dim
self.decoder_rnn_dim = hparams.decoder_rnn_dim
self.prenet_dim = hparams.prenet_dim
self.max_decoder_steps = hparams.max_decoder_steps
self.gate_threshold = hparams.gate_threshold
self.p_attention_dropout = hparams.p_attention_dropout
self.p_decoder_dropout = hparams.p_decoder_dropout
self.prenet = Prenet(
hparams.n_mel_channels * hparams.n_frames_per_step,
[hparams.prenet_dim, hparams.prenet_dim])
self.attention_rnn = nn.LSTMCell(
hparams.prenet_dim + hparams.encoder_embedding_dim,
hparams.attention_rnn_dim)
self.attention_layer = Attention(
hparams.attention_rnn_dim, hparams.encoder_embedding_dim,
hparams.attention_dim, hparams.attention_location_n_filters,
hparams.attention_location_kernel_size)
encoder_tot_dim = (hparams.encoder_embedding_dim + \
hparams.lat_dim + hparams.obs_dim)
self.decoder_rnn = nn.LSTMCell(
hparams.attention_rnn_dim + encoder_tot_dim,
hparams.decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
hparams.decoder_rnn_dim + encoder_tot_dim,
hparams.n_mel_channels * hparams.n_frames_per_step)
self.gate_layer = LinearNorm(
hparams.decoder_rnn_dim + encoder_tot_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, obs_and_lat, mask):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
obs_and_lat: Observed and latent attribute embeddings
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.obs_and_lat = obs_and_lat
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_hidden = F.dropout(
self.attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
if self.obs_and_lat is not None:
decoder_input = torch.cat((decoder_input, self.obs_and_lat), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
self.decoder_hidden = F.dropout(
self.decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
if self.obs_and_lat is not None:
decoder_hidden_attention_context = torch.cat(
(decoder_hidden_attention_context, self.obs_and_lat), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, obs_and_lat, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
obs_and_lat: Observed and latent attribute embeddings
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
self.initialize_decoder_states(
memory, obs_and_lat, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output.squeeze()]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def inference(self, memory, obs_and_lat, ret_has_eos=False):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
obs_and_lat: Observed and latent attribute embeddings
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, obs_and_lat, mask=None)
mel_outputs, gate_outputs, alignments = [], [], []
has_eos = False
while True:
decoder_input = self.prenet(decoder_input)
mel_output, gate_output, alignment = self.decode(decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output]
alignments += [alignment]
if torch.sigmoid(gate_output.data) > self.gate_threshold:
has_eos = True
break
elif len(mel_outputs) == self.max_decoder_steps:
# print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
if ret_has_eos:
return mel_outputs, gate_outputs, alignments, has_eos
else:
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, hparams):
super(Tacotron2, self).__init__()
self.mask_padding = hparams.mask_padding
self.fp16_run = hparams.fp16_run
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
# initialize text encoder embedding
self.embedding = nn.Embedding(
hparams.n_symbols, hparams.symbols_embedding_dim)
std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.embedding.weight.data.uniform_(-val, val)
# initialize observed attribute embedding
self.obs_embedding = None
if hparams.obs_dim > 0:
self.obs_embedding = nn.Embedding(
hparams.obs_n_class, hparams.obs_dim)
std = sqrt(2.0 / (hparams.obs_n_class + hparams.obs_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.obs_embedding.weight.data.uniform_(-val, val)
self.encoder = Encoder(hparams)
self.decoder = Decoder(hparams)
self.postnet = Postnet(hparams)
self.lat_encoder = None
if hparams.lat_dim > 0:
self.lat_encoder = AudioEncoder(hparams)
def parse_batch(self, batch):
(text_padded, input_lengths, obs_labels,
mel_padded, gate_padded, output_lengths) = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
obs_labels = to_gpu(obs_labels).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
return (
(text_padded, input_lengths, obs_labels,
mel_padded, max_len, output_lengths),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
(text_inputs, text_lengths, obs_labels,
mels, max_len, output_lengths) = inputs
text_lengths, output_lengths = text_lengths.data, output_lengths.data
embedded_inputs = self.embedding(text_inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, text_lengths)
obs = None
if self.obs_embedding is not None:
obs = self.obs_embedding(obs_labels)
lat, lat_mu, lat_logvar = None, None, None
if self.lat_encoder is not None:
(lat, lat_mu, lat_logvar) = self.lat_encoder(mels, output_lengths)
obs_and_lat = [x for x in [obs, lat] if x is not None]
if bool(obs_and_lat):
obs_and_lat = torch.cat(obs_and_lat, dim=-1)
else:
obs_and_lat = None
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, obs_and_lat, mels, memory_lengths=text_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments,
lat_mu, lat_logvar],
output_lengths)
def inference(self, inputs, obs_labels=None, lat=None, ret_has_eos=False):
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder.inference(embedded_inputs)
if obs_labels is None:
obs_labels = torch.LongTensor(len(inputs))
obs_labels = obs_labels.to(inputs.device).zero_()
obs = None
if self.obs_embedding is not None:
obs = self.obs_embedding(obs_labels)
if self.lat_encoder is not None:
if lat is None:
lat = torch.FloatTensor(len(inputs), self.lat_encoder.lat_dim)
lat = lat.to(inputs.device).zero_().type(encoder_outputs.type())
obs_and_lat = [x for x in [obs, lat] if x is not None]
if bool(obs_and_lat):
obs_and_lat = torch.cat(obs_and_lat, dim=-1)
else:
obs_and_lat = None
mel_outputs, gate_outputs, alignments, has_eos = self.decoder.inference(
encoder_outputs, obs_and_lat, ret_has_eos=True)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
outputs = self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
if ret_has_eos:
return outputs + [has_eos]
else:
return outputs
| 25,989 | 37.791045 | 82 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/layers.py
|
import torch
from librosa.filters import mel as librosa_mel_fn
from .audio_processing import dynamic_range_compression
from .audio_processing import dynamic_range_decompression
from .stft import STFT
from .utils import get_mask_from_lengths
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class GlobalAvgPool(torch.nn.Module):
def __init__(self):
super(GlobalAvgPool, self).__init__()
def forward(self, x, lengths=None):
"""Average pooling across time steps (dim=1) with optionally lengths.
Args:
x: torch.Tensor of shape (N, T, ...)
lengths: None or torch.Tensor of shape (N,)
dim: dimension to pool
"""
if lengths is None:
return x.mean(dim=1, keepdim=False)
else:
mask = get_mask_from_lengths(lengths).type(x.type()).to(x.device)
mask_shape = list(mask.size()) + [1 for _ in range(x.ndimension()-2)]
mask = mask.reshape(*mask_shape)
numer = (x * mask).sum(dim=1, keepdim=False)
denom = mask.sum(dim=1, keepdim=False)
return numer / denom
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
| 3,859 | 36.115385 | 81 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py
|
# import sys
# sys.path.append('tacotron2')
import torch
from .layers import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with waveglow """
def __init__(self, waveglow, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).cuda()
if mode == 'zeros':
mel_input = torch.zeros(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
elif mode == 'normal':
mel_input = torch.randn(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| 1,610 | 38.292683 | 77 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/audio_processing.py
|
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
| 2,610 | 26.776596 | 83 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/text.py
|
""" from https://github.com/keithito/tacotron """
import numpy as np
import re
from . import cleaners
from .symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Special symbols
SOS_TOK = '<s>'
EOS_TOK = '</s>'
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sample_code_chunk(code, size):
assert(size > 0 and size <= len(code))
start = np.random.randint(len(code) - size + 1)
end = start + size
return code[start:end], start, end
def code_to_sequence(code, code_dict, collapse_code):
if collapse_code:
prev_c = None
sequence = []
for c in code:
if c in code_dict and c != prev_c:
sequence.append(code_dict[c])
prev_c = c
else:
sequence = [code_dict[c] for c in code if c in code_dict]
if len(sequence) < 0.95 * len(code):
print('WARNING : over 5%% codes are OOV')
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def sequence_to_code(sequence, code_dict):
'''Analogous to sequence_to_text'''
id_to_code = {i: c for c, i in code_dict.items()}
return ' '.join([id_to_code[i] for i in sequence])
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s != '_' and s != '~'
| 3,075 | 27.481481 | 93 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/cmudict.py
|
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| 1,979 | 29 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/numbers.py
|
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| 2,167 | 29.111111 | 93 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/symbols.py
|
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from . import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| 718 | 36.842105 | 195 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/unit2speech/tacotron2/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/metrics/asr_metrics/ppx.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import warnings
def get_target_sequences(manifest, ground_truth, to_take=1000):
import json
import pathlib
with open(ground_truth, 'r') as fin:
original_continuations = json.loads(fin.read())
sequence2length = [(k, v[0]) for k, v in original_continuations.items()]
assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds
sequence2length.sort(key=lambda x: x[1])
to_take_sequences = set(v[0] for v in sequence2length[:to_take])
to_take_ids = []
with open(manifest, 'r') as f:
f.readline()
for i, line in enumerate(f.readlines()):
seq_id = line.split()[0]
seq_id = pathlib.Path(seq_id).name.split('__')[0]
if seq_id in to_take_sequences:
to_take_ids.append(i)
print(f'Took {len(to_take_ids)} ids')
return set(to_take_ids)
def get_args():
import argparse
parser = argparse.ArgumentParser("Evaluate PPX metric of a transcript.")
parser.add_argument('--asr-transcript', type=str,
help='Path to the transcript file.')
parser.add_argument('--cut-id', action='store_true',
help='Whether cut the first token (typically a seq id)')
parser.add_argument('--cut-tail', action='store_true',
help='Whether cut the last token (typically a speaker id)')
parser.add_argument('--manifest', type=str, default=None)
parser.add_argument('--prompts-description', type=str, default=None)
args = parser.parse_args()
return args
def main():
args = get_args()
lm = torch.hub.load(
'pytorch/fairseq', 'transformer_lm.wmt19.en', tokenizer='moses', bpe='fastbpe')
lm.eval().cuda() # disable dropout
if args.manifest is None and args.prompts_description is None:
target_ids = None
else:
target_ids = get_target_sequences(
args.manifest, args.prompts_description)
with open(args.asr_transcript, 'r') as fin:
lines = fin.readlines()
if target_ids is not None:
filtered = []
for line in lines:
line_id = line.split()[-1]
line_id = int(line_id.split('-')[1][:-1])
if line_id in target_ids:
filtered.append(line)
lines = filtered
else:
pass
if args.cut_id:
lines = [' '.join(x.split()[1:]) for x in lines]
if args.cut_tail:
lines = [' '.join(x.split()[:-1]) for x in lines]
lines = [x.strip().lower() for x in lines]
def get_logprob(sent): return \
lm.score(sent)['positional_scores'].mean().neg().item()
logprobs = [get_logprob(l) for l in lines]
filtered = [x for x in logprobs if not np.isnan(x)]
if len(filtered) != len(logprobs):
warnings.warn("NaNs detected!")
logprobs = filtered
perplexities = [np.exp(l) for l in logprobs]
for name, stats in [('logprob', logprobs), ('perplexity', perplexities)]:
mean = np.mean(stats)
sem = np.std(stats) / np.sqrt(len(stats))
median = np.median(stats)
interval = list(np.percentile(stats, [10, 90]))
mean, sem, median, percentile10, percentile90 = [
round(x, 2) for x in [mean, sem, median] + interval]
print(name)
print(f"\tMean {mean} +- {sem}")
print(
f"\tMedian {median}, 90% confidence interval {percentile10}...{percentile90}")
if __name__ == '__main__':
main()
| 3,692 | 29.02439 | 90 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import nltk
from misc.bleu_utils import sentence_bleu
import warnings
def get_target_sequences(manifest, ground_truth, to_take=1000):
import json
import pathlib
with open(ground_truth, 'r') as fin:
original_continuations = json.loads(fin.read())
sequence2length = [(k, v[0]) for k, v in original_continuations.items()]
assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds
sequence2length.sort(key=lambda x: x[1])
to_take_sequences = set(v[0] for v in sequence2length[:to_take])
to_take_ids = []
with open(manifest, 'r') as f:
f.readline()
for i, line in enumerate(f.readlines()):
seq_id = line.split()[0]
seq_id = pathlib.Path(seq_id).name.split('__')[0]
if seq_id in to_take_sequences:
to_take_ids.append(i)
print(f'Took {len(to_take_ids)} ids')
return set(to_take_ids)
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--asr-transcript', type=str,
help='Path to the transcript file.')
parser.add_argument('--manifest', required=True)
parser.add_argument('--prompts-description', required=True)
parser.add_argument('--cut-id', action='store_true',
help='Whether cut the first token (typically a seq id)')
parser.add_argument('--cut-tail', action='store_true',
help='Whether cut the last token (typically a speaker id)')
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
return args
def get_self_bleu(utterances, averaging_mode, weights):
self_bleu = []
for i in range(len(utterances)):
hypo = utterances[i]
rest = utterances[:i] + utterances[i+1:]
self_bleu.append(sentence_bleu(rest, hypo, weights,
no_length_penalty=True, averaging_mode=averaging_mode))
return self_bleu
def get_self_bleu2_arithmetic(utterances):
weights = (0.5, 0.5) # equal weight for unigrams and bigrams
return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights)
def get_self_bleu2_geometric(utterances):
weights = (0.5, 0.5)
return get_self_bleu(utterances, averaging_mode='geometric', weights=weights)
def get_auto_bleu2_arithmetic(utterances):
weights = (0.5, 0.5)
return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances]
def get_auto_bleu2_geometric(utterances):
weights = (0.5, 0.5)
return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances]
def get_auto_bleu3_geometric(utterances):
weights = (1./3, 1./3, 1./3)
return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances]
def get_auto_bleu3_arithmetic(utterances):
weights = (1./3, 1./3, 1./3)
return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances]
def get_self_bleu3_arithmetic(utterances):
weights = (1./3, 1./3, 1./3)
return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights)
def get_self_bleu3_geometric(utterances):
weights = (1./3, 1./3, 1./3)
return get_self_bleu(utterances, averaging_mode='geometric', weights=weights)
def auto_bleu(sentence, weights, mean_mode='arithmetic'):
if len(sentence) <= 1:
return 0
N = len(weights)
bleu_n = np.zeros([N])
for n in range(N):
targ_ngrams = list(nltk.ngrams(sentence, n+1))
for p in range(len(targ_ngrams)):
left = sentence[:p]
right = sentence[(p+n+1):]
rest_ngrams = list(nltk.ngrams(left, n+1)) + \
list(nltk.ngrams(right, n+1))
# compute the nb of matching ngrams
bleu_n[n] += targ_ngrams[p] in rest_ngrams
bleu_n[n] /= len(targ_ngrams) # average them to get a proportion
weights = np.array(weights)
if mean_mode == 'arithmetic':
return (bleu_n * weights).sum()
elif mean_mode == 'geometric':
return (bleu_n ** weights).prod()
else:
raise ValueError(f'Unknown agggregation mode {mean_mode}')
def main():
from multiprocessing import Pool
args = get_args()
target_ids = get_target_sequences(args.manifest, args.prompts_description)
with open(args.asr_transcript, 'r') as fin:
lines = fin.readlines()
terms = [x.strip().split() for x in lines]
filtered = []
for term in terms:
line_id = int(term[-1].split('-')[1][:-1])
if line_id in target_ids:
filtered.append(term)
terms = filtered
if args.cut_id:
terms = [x[1:] for x in terms]
if args.cut_tail:
terms = [x[:-1] for x in terms]
if args.debug:
terms = terms[:10]
tasks = [
('Self-BLEU2-arithmetic', get_self_bleu2_arithmetic),
('Self-BLEU2-geometric', get_self_bleu2_geometric),
('Auto-BLEU2-arithmetic', get_auto_bleu2_arithmetic),
('Auto-BLEU2-geometric', get_auto_bleu2_geometric),
('Self-BLEU3-arithmetic', get_self_bleu3_arithmetic),
('Self-BLEU3-geometric', get_self_bleu3_geometric),
('Auto-BLEU3-arithmetic', get_auto_bleu3_arithmetic),
('Auto-BLEU3-geometric', get_auto_bleu3_geometric),
]
n_processes = min(16, len(tasks))
with Pool(n_processes) as pool:
metrics = pool.map(run_f, [(t[1], terms) for t in tasks])
for (metric_name, _), metric in zip(tasks, metrics):
metric, sem = np.mean(metric), np.std(metric) / np.sqrt(len(metric))
metric, sem = [
round(100 * x, 2) for x in [metric, sem]
]
print(f'{metric_name} {metric} +- {sem}')
def run_f(task_params):
f, terms = task_params
return f(terms)
if __name__ == '__main__':
# NLTK produces warnings
warnings.filterwarnings("ignore")
main()
| 6,101 | 29.207921 | 86 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import numpy as np
from misc.bleu_utils import sentence_bleu
import json
import warnings
def get_args():
import argparse
parser = argparse.ArgumentParser("Tool to calculate Continuation-BLEU2")
parser.add_argument('--asr-transcript', type=str,
help='Path to the transcript file.')
parser.add_argument('--prompts-description', type=str,
help='Path to the ground-truth continuation')
parser.add_argument('--manifest', type=str, required=True)
parser.add_argument('--take-shortest', type=int, default=1000)
args = parser.parse_args()
return args
def main():
# NLTK produces warnings
warnings.filterwarnings("ignore")
args = get_args()
with open(args.prompts_description, 'r') as fin:
original_continuations = json.loads(fin.read())
sequence2length = [(k, v[0]) for k, v in original_continuations.items()]
assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds
sequence2length.sort(key=lambda x: x[1])
to_take = set(v[0] for v in sequence2length[:args.take_shortest])
with open(args.manifest, 'r') as fin:
fin.readline()
linenum2file = dict([
(i, l.split("__")[0]) for (i, l) in enumerate(fin)
])
max_files = max(linenum2file.keys())
continuations = defaultdict(list)
mean_length_after = 0
n_examples = 0
with open(args.asr_transcript, 'r') as fin:
for line in fin:
n_examples += 1
line = line.split()
sequence_id = int(line[-1].split('-')[1][:-1])
assert sequence_id <= max_files
sequence_name = linenum2file[sequence_id]
continuations[sequence_name].append(line[:-1])
mean_length_after += len(line)
mean_length_after /= n_examples
print(f'Mean length of continuations, in words: {mean_length_after}')
metric_values = []
mean_ground_truth_words = 0
n_examples = 0
n_candidates = 0
for k, candidates in continuations.items():
if k not in to_take:
continue
n_examples += 1
ground_truth = original_continuations[k][1].split()
n_candidates += len(candidates)
bleu = sentence_bleu(candidates, ground_truth, weights=(
0.5, 0.5), no_length_penalty=True, averaging_mode="geometric")
mean_ground_truth_words += len(ground_truth)
metric_values.append(bleu)
n = len(metric_values)
print(
f'Median BLEU over {n} examples: {np.median(metric_values)} +- {np.std(metric_values) / np.sqrt(n)}')
if __name__ == '__main__':
main()
| 2,869 | 27.7 | 109 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/metrics/asr_metrics/misc/bleu_utils.py
|
"""
TODO: the code is take from Apache-2 Licensed NLTK: make sure we do this properly!
Copied over from nltk.tranlate.bleu_score. This code has two major changes:
- allows to turn off length/brevity penalty --- it has no sense for self-bleu,
- allows to use arithmetic instead of geometric mean
"""
import math
import sys
from fractions import Fraction
import warnings
from collections import Counter
from nltk.translate.bleu_score import modified_precision, closest_ref_length, brevity_penalty, SmoothingFunction
def corpus_bleu(
list_of_references,
hypotheses,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
averaging_mode="geometric",
no_length_penalty=False
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
assert len(list_of_references) == len(hypotheses), (
"The number of hypotheses and their reference(s) should be the " "same "
)
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
# Calculate corpus-level brevity penalty.
if no_length_penalty and averaging_mode == 'geometric':
bp = 1.0
elif no_length_penalty and averaging_mode == 'arithmetic':
bp = 0.0
else:
assert not no_length_penalty
assert averaging_mode != 'arithmetic', 'Not sure how to apply length penalty when aurithmetic mode'
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweigh:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method0
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
if averaging_mode == "geometric":
s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
elif averaging_mode == "arithmetic":
s = (w_i * p_i for w_i, p_i in zip(weights, p_n))
s = math.fsum(s)
return s
def sentence_bleu(
references,
hypothesis,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
averaging_mode="geometric",
no_length_penalty=False
):
return corpus_bleu(
[references], [hypothesis], weights, smoothing_function, auto_reweigh, averaging_mode, no_length_penalty
)
| 6,679 | 39.240964 | 112 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/metrics/asr_metrics/misc/cut_as.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torchaudio
import argparse
import json
import pathlib
def get_args():
parser = argparse.ArgumentParser(
"Assuring generated audio have the same length as ground-truth audio")
parser.add_argument('--samples_dir', required=True, type=str)
parser.add_argument('--out_dir', required=True, type=str)
parser.add_argument('--prompts_description', required=True, type=str)
return parser.parse_args()
def cut(src, tgt, l):
x, sr = torchaudio.load(str(src))
assert sr == 16_000
x = x.squeeze()
target_frames = int(l * sr)
flag = 0
if target_frames <= x.size(0):
x = x[:target_frames]
flag = 1
else:
flag = 0
torchaudio.save(str(tgt), x.unsqueeze(0), sr)
return flag
def main():
args = get_args()
tgt_dir = pathlib.Path(args.out_dir)
tgt_dir.mkdir(exist_ok=True, parents=True)
total_files, sufficiently_long = 0, 0
with open(args.prompts_description, 'r') as f:
description = json.loads(f.read())
for src_f in pathlib.Path(args.samples_dir).glob('*.wav'):
name_prompt = src_f.with_suffix('').name.split('__')[0]
assert name_prompt in description, f'Cannot find {name_prompt}!'
target_length = description[name_prompt][0]
tgt_f = tgt_dir / (src_f.name)
is_long_enough = cut(src_f, tgt_f, target_length)
sufficiently_long += is_long_enough
if not is_long_enough:
print(f'{src_f} is not long enough')
total_files += 1
print(
f'Total files: {total_files}; sufficiently long: {sufficiently_long}')
if __name__ == '__main__':
main()
| 1,832 | 25.185714 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/metrics/abx_metrics/dump_abx_feats.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import joblib
import numpy as np
from examples.textless_nlp.gslm.speech2unit.clustering.utils import get_audio_files
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_features
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(
description="Quantize using K-means clustering over acoustic features."
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
required=True,
help="Acoustic feature type",
)
parser.add_argument(
"--kmeans_model_path",
type=str,
required=True,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--checkpoint_path",
type=str,
help="Pretrained model checkpoint",
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--out_dir_path",
required=True,
type=str,
help="File path of quantized output.",
)
parser.add_argument(
"--extension", type=str, default=".flac", help="Features file path"
)
return parser
def one_hot(feat, n_clusters):
return np.eye(n_clusters)[feat]
def main(args, logger):
# Feature extraction
logger.info(f"Extracting {args.feature_type} acoustic features...")
features_batch = get_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=1.0,
flatten=False,
)
logger.info(f"Features extracted for {len(features_batch)} utterances.\n")
logger.info(f"Dimensionality of representation = {features_batch[0].shape[1]}")
logger.info(f"Loading K-means model from {args.kmeans_model_path} ...")
kmeans_model = joblib.load(open(args.kmeans_model_path, "rb"))
kmeans_model.verbose = False
_, fnames, _ = get_audio_files(args.manifest_path)
os.makedirs(args.out_dir_path, exist_ok=True)
logger.info(f"Writing quantized features to {args.out_dir_path}")
for i, feats in enumerate(features_batch):
pred = kmeans_model.predict(feats)
emb = one_hot(pred, kmeans_model.n_clusters)
base_fname = os.path.basename(fnames[i]).rstrip(args.extension)
output_path = os.path.join(args.out_dir_path, f"{base_fname}.npy")
with open(output_path, "wb") as f:
np.save(f, emb)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| 3,329 | 29.833333 | 83 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/clustering/quantize_with_kmeans.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import numpy as np
import joblib
from examples.textless_nlp.gslm.speech2unit.clustering.utils import (
get_audio_files,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
get_features,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(
description="Quantize using K-means clustering over acoustic features."
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
required=True,
help="Acoustic feature type",
)
parser.add_argument(
"--acoustic_model_path",
type=str,
help="Pretrained acoustic model checkpoint"
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--kmeans_model_path",
type=str,
required=True,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--features_path",
type=str,
default=None,
help="Features file path. You don't need to enter acoustic model details if you have dumped features",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--out_quantized_file_path",
required=True,
type=str,
help="File path of quantized output.",
)
parser.add_argument(
"--extension", type=str, default=".flac", help="Features file path"
)
return parser
def main(args, logger):
# Feature extraction
if args.features_path is not None:
logger.info(f"Loading acoustic features from {args.features_path}...")
features_batch = np.load(args.features_path)
else:
logger.info(f"Extracting {args.feature_type} acoustic features...")
features_batch = get_features(
feature_type=args.feature_type,
checkpoint_path=args.acoustic_model_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=1.0,
flatten=False,
)
logger.info(
f"Features extracted for {len(features_batch)} utterances.\n"
)
logger.info(
f"Dimensionality of representation = {features_batch[0].shape[1]}"
)
# K-means model
logger.info(f"Loading K-means model from {args.kmeans_model_path} ...")
kmeans_model = joblib.load(open(args.kmeans_model_path, "rb"))
kmeans_model.verbose = False
_, fnames, _ = get_audio_files(args.manifest_path)
os.makedirs(os.path.dirname(args.out_quantized_file_path), exist_ok=True)
print(f"Writing quantized predictions to {args.out_quantized_file_path}")
with open(args.out_quantized_file_path, "w") as fout:
for i, feats in enumerate(features_batch):
pred = kmeans_model.predict(feats)
pred_str = " ".join(str(p) for p in pred)
base_fname = os.path.basename(fnames[i]).rstrip(args.extension)
fout.write(f"{base_fname}|{pred_str}\n")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| 3,821 | 29.333333 | 110 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import time
import numpy as np
from sklearn.cluster import MiniBatchKMeans
import joblib
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
get_and_dump_features,
get_features,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(
description="Learn K-means clustering over acoustic features."
)
# Features arguments
parser.add_argument(
"--in_features_path", type=str, default=None, help="Features file path"
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
help="Acoustic feature type",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--out_features_path",
type=str,
default=None,
help="Features file path to write to",
)
parser.add_argument(
"--checkpoint_path",
type=str,
help="Pretrained acoustic model checkpoint",
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--sample_pct",
type=float,
help="Percent data to use for K-means training",
default=0.1,
)
# K-means arguments
parser.add_argument(
"--num_clusters", type=int, help="Nubmer of clusters", default=50
)
parser.add_argument("--init", default="k-means++")
parser.add_argument(
"--max_iter",
type=int,
help="Maximum number of iterations for K-means training",
default=150,
)
parser.add_argument(
"--batch_size",
type=int,
help="Batch size for K-means training",
default=10000,
)
parser.add_argument("--tol", default=0.0, type=float)
parser.add_argument("--max_no_improvement", default=100, type=int)
parser.add_argument("--n_init", default=20, type=int)
parser.add_argument("--reassignment_ratio", default=0.5, type=float)
parser.add_argument(
"--out_kmeans_model_path",
type=str,
required=True,
help="Path to save K-means model",
)
# Leftovers
parser.add_argument(
"--seed",
type=int,
help="Random seed to use for K-means training",
default=1369,
)
return parser
def get_kmeans_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
random_state,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
tol=tol,
max_no_improvement=max_no_improvement,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
random_state=random_state,
verbose=1,
compute_labels=True,
init_size=None,
)
def train_kmeans(kmeans_model, features_batch):
start_time = time.time()
kmeans_model.fit(features_batch)
time_taken = round((time.time() - start_time) // 60, 2)
return kmeans_model, time_taken
def main(args, logger):
# Features loading/extraction for K-means
if args.in_features_path:
# Feature loading
logger.info(f"Loading features from {args.in_features_path}...")
features_batch = np.load(args.in_features_path, allow_pickle=True)
else:
# Feature extraction
logger.info(f"Extracting {args.feature_type} acoustic features...")
features_batch = (
get_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=args.sample_pct,
flatten=True,
)
if not args.out_features_path
else get_and_dump_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=args.sample_pct,
flatten=True,
out_features_path=args.out_features_path,
)
)
if args.out_features_path:
logger.info(
f"Saved extracted features at {args.out_features_path}"
)
logger.info(f"Features shape = {features_batch.shape}\n")
# Learn and save K-means model
kmeans_model = get_kmeans_model(
n_clusters=args.num_clusters,
init=args.init,
max_iter=args.max_iter,
batch_size=args.batch_size,
tol=args.tol,
max_no_improvement=args.max_no_improvement,
n_init=args.n_init,
reassignment_ratio=args.reassignment_ratio,
random_state=args.seed,
)
logger.info("Starting k-means training...")
kmeans_model, time_taken = train_kmeans(
kmeans_model=kmeans_model, features_batch=features_batch
)
logger.info(f"...done k-means training in {time_taken} minutes")
inertia = -kmeans_model.score(features_batch) / len(features_batch)
logger.info(f"Total intertia: {round(inertia, 2)}\n")
logger.info(f"Saving k-means model to {args.out_kmeans_model_path}")
os.makedirs(os.path.dirname(args.out_kmeans_model_path), exist_ok=True)
joblib.dump(kmeans_model, open(args.out_kmeans_model_path, "wb"))
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| 6,182 | 28.028169 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/clustering/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Tuple
def get_audio_files(manifest_path: str) -> Tuple[str, List[str], List[int]]:
fnames, sizes = [], []
with open(manifest_path, "r") as f:
root_dir = f.readline().strip()
for line in f:
items = line.strip().split("\t")
assert (
len(items) == 2
), f"File must have two columns separated by tab. Got {line}"
fnames.append(items[0])
sizes.append(int(items[1]))
return root_dir, fnames, sizes
| 701 | 32.428571 | 76 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/clustering/dump_feats.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
get_and_dump_features,
)
def get_parser():
parser = argparse.ArgumentParser(
description="Compute and dump log mel fbank features."
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
help="Acoustic feature type",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--out_features_path",
type=str,
default=None,
help="Features file path to write to",
)
parser.add_argument(
"--checkpoint_path",
type=str,
help="Pretrained acoustic model checkpoint",
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--sample_pct",
type=float,
help="Percent data to use for K-means training",
default=0.1,
)
parser.add_argument(
"--out_features_path",
type=str,
help="Path to save log mel fbank features",
)
return parser
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
if __name__ == "__main__":
"""
Example command:
python ~/speechbot/clustering/dump_logmelfank_feats.py \
--manifest_path /checkpoint/kushall/data/LJSpeech-1.1/asr_input_wavs_16k/train.tsv
--out_features_path /checkpoint/kushall/experiments/speechbot/logmelfbank/features/ljspeech/train.npy
"""
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
logger.info(f"Extracting {args.feature_type} acoustic features...")
get_and_dump_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=args.sample_pct,
flatten=True,
out_features_path=args.out_features_path,
)
logger.info(f"Saved extracted features at {args.out_features_path}")
| 2,615 | 27.434783 | 109 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/hubert_feature_reader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import fairseq
import soundfile as sf
import torch.nn.functional as F
class HubertFeatureReader:
"""
Wrapper class to run inference on HuBERT model.
Helps extract features for a given audio file.
"""
def __init__(self, checkpoint_path, layer, max_chunk=1600000):
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path]
)
self.model = model[0].eval().cuda()
self.task = task
self.layer = layer
self.max_chunk = max_chunk
def read_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
assert sr == self.task.cfg.sample_rate, sr
if ref_len is not None and abs(ref_len - len(wav)) > 160:
print(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, file_path, ref_len=None):
x = self.read_audio(file_path, ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float().cuda()
if self.task.cfg.normalize:
x = F.layer_norm(x, x.shape)
x = x.view(1, -1)
feat = []
for start in range(0, x.size(1), self.max_chunk):
x_chunk = x[:, start: start + self.max_chunk]
feat_chunk, _ = self.model.extract_features(
source=x_chunk,
padding_mask=None,
mask=False,
output_layer=self.layer,
)
feat.append(feat_chunk)
return torch.cat(feat, 1).squeeze(0)
| 1,912 | 30.883333 | 66 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/w2v2_feature_reader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import fairseq
import soundfile as sf
class Wav2VecFeatureReader:
"""
Wrapper class to run inference on Wav2Vec 2.0 model.
Helps extract features for a given audio file.
"""
def __init__(self, checkpoint_path, layer):
state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(
checkpoint_path
)
w2v_args = state["args"]
self.task = fairseq.tasks.setup_task(w2v_args)
model = self.task.build_model(w2v_args)
model.load_state_dict(state["model"], strict=True)
model.eval()
model.cuda()
self.model = model
self.layer = layer
def read_audio(self, fname):
wav, sr = sf.read(fname)
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
assert sr == self.task.cfg.sample_rate, sr
return wav
def get_feats(self, file_path):
x = self.read_audio(file_path)
with torch.no_grad():
source = torch.from_numpy(x).view(1, -1).float().cuda()
res = self.model(
source=source, mask=False, features_only=True, layer=self.layer
)
return res["layer_results"][self.layer][0].squeeze(1)
| 1,424 | 29.319149 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gc
import os
import random
import shutil
import numpy as np
import torch
import tqdm
from examples.textless_nlp.gslm.speech2unit.pretrained.cpc_feature_reader import (
CpcFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.hubert_feature_reader import (
HubertFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.logmel_feature_reader import (
LogMelFeatureReader,
)
from examples.textless_nlp.gslm.speech2unit.pretrained.w2v2_feature_reader import (
Wav2VecFeatureReader,
)
def get_feature_reader(feature_type):
if feature_type == "logmel":
return LogMelFeatureReader
elif feature_type == "hubert":
return HubertFeatureReader
elif feature_type == "w2v2":
return Wav2VecFeatureReader
elif feature_type == "cpc":
return CpcFeatureReader
else:
raise NotImplementedError(f"{feature_type} is not supported.")
def get_feature_iterator(
feature_type, checkpoint_path, layer, manifest_path, sample_pct
):
feature_reader_cls = get_feature_reader(feature_type)
with open(manifest_path, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
file_path_list = [
os.path.join(root, line.split("\t")[0])
for line in lines
if len(line) > 0
]
if sample_pct < 1.0:
file_path_list = random.sample(
file_path_list, int(sample_pct * len(file_path_list))
)
num_files = len(file_path_list)
reader = feature_reader_cls(
checkpoint_path=checkpoint_path, layer=layer
)
def iterate():
for file_path in file_path_list:
feats = reader.get_feats(file_path)
yield feats.cpu().numpy()
return iterate, num_files
def get_features(
feature_type, checkpoint_path, layer, manifest_path, sample_pct, flatten
):
generator, num_files = get_feature_iterator(
feature_type=feature_type,
checkpoint_path=checkpoint_path,
layer=layer,
manifest_path=manifest_path,
sample_pct=sample_pct,
)
iterator = generator()
features_list = []
for features in tqdm.tqdm(iterator, total=num_files):
features_list.append(features)
# Explicit clean up
del iterator
del generator
gc.collect()
torch.cuda.empty_cache()
if flatten:
return np.concatenate(features_list)
return features_list
def get_and_dump_features(
feature_type,
checkpoint_path,
layer,
manifest_path,
sample_pct,
flatten,
out_features_path,
):
# Feature extraction
features_batch = get_features(
feature_type=feature_type,
checkpoint_path=checkpoint_path,
layer=layer,
manifest_path=manifest_path,
sample_pct=sample_pct,
flatten=flatten,
)
# Save features
out_dir_path = os.path.dirname(out_features_path)
os.makedirs(out_dir_path, exist_ok=True)
shutil.copyfile(
manifest_path,
os.path.join(out_dir_path, os.path.basename(manifest_path)),
)
np.save(out_features_path, features_batch)
return features_batch
| 3,407 | 25.834646 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/logmel_feature_reader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import soundfile as sf
import torch
import torchaudio.compliance.kaldi as kaldi
class LogMelFeatureReader:
"""
Wrapper class to run inference on HuBERT model.
Helps extract features for a given audio file.
"""
def __init__(self, *args, **kwargs):
self.num_mel_bins = kwargs.get("num_mel_bins", 80)
self.frame_length = kwargs.get("frame_length", 25.0)
def get_feats(self, file_path):
wav, sr = sf.read(file_path)
feats = torch.from_numpy(wav).float()
feats = kaldi.fbank(
feats.unsqueeze(0),
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
sample_frequency=sr,
)
return feats
| 905 | 28.225806 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py
|
import soundfile as sf
import torch
import torch.nn as nn
import torch.nn.functional as F
class CpcFeatureReader:
"""
Wrapper class to run inference on CPC model.
Helps extract features for a given audio file.
"""
def __init__(
self,
checkpoint_path,
layer,
use_encoder_layer=False,
norm_features=False,
sample_rate=16000,
max_chunk=64000,
):
self.model = load_cpc_model(checkpoint_path, layer).eval().cuda()
self.sample_rate = sample_rate
self.max_chunk = max_chunk
self.norm_features = norm_features
self.use_encoder_layer = use_encoder_layer
def read_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
assert sr == self.sample_rate, sr
if ref_len is not None and abs(ref_len - len(wav)) > 160:
print(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, file_path, ref_len=None):
x = self.read_audio(file_path, ref_len)
# Inspired from CPC_audio feature_loader.py
with torch.no_grad():
x = torch.from_numpy(x).float().cuda()
x = x.view(1, 1, -1)
size = x.size(2)
feat = []
start = 0
while start < size:
if start + self.max_chunk > size:
break
x_chunk = x[..., start : start + self.max_chunk]
feat_chunk = self.model.extract_features(
source=x_chunk,
get_encoded=self.use_encoder_layer,
norm_output=self.norm_features,
)
feat.append(feat_chunk)
start += self.max_chunk
if start < size:
x_chunk = x[:, -self.max_chunk :]
feat_chunk = self.model.extract_features(
source=x_chunk,
get_encoded=self.use_encoder_layer,
norm_output=self.norm_features,
)
df = x_chunk.size(2) // feat_chunk.size(1)
delta = (size - start) // df
feat.append(feat_chunk[:, -delta:])
return torch.cat(feat, 1).squeeze(0)
def load_cpc_model(checkpoint_path, layer=None):
state_dict = torch.load(checkpoint_path)
weights = state_dict["weights"]
config = state_dict["config"]
if layer is not None:
config["nLevelsGRU"] = layer
encoder = CPCEncoder(config["hiddenEncoder"])
ar_net = CPCAR(
config["hiddenEncoder"], config["hiddenGar"], False, config["nLevelsGRU"]
)
model = CPCModel(encoder, ar_net)
model.load_state_dict(weights, strict=False)
model.config = config
return model
class ChannelNorm(nn.Module):
def __init__(self, num_features, epsilon=1e-05, affine=True):
super(ChannelNorm, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1, num_features, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, num_features, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
cum_mean = x.mean(dim=1, keepdim=True)
cum_var = x.var(dim=1, keepdim=True)
x = (x - cum_mean) * torch.rsqrt(cum_var + self.epsilon)
if self.weight is not None:
x = x * self.weight + self.bias
return x
class CPCEncoder(nn.Module):
def __init__(self, hidden_dim=512):
super(CPCEncoder, self).__init__()
self.conv0 = nn.Conv1d(1, hidden_dim, 10, stride=5, padding=3)
self.batchNorm0 = ChannelNorm(hidden_dim)
self.conv1 = nn.Conv1d(hidden_dim, hidden_dim, 8, stride=4, padding=2)
self.batchNorm1 = ChannelNorm(hidden_dim)
self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
self.batchNorm2 = ChannelNorm(hidden_dim)
self.conv3 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
self.batchNorm3 = ChannelNorm(hidden_dim)
self.conv4 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
self.batchNorm4 = ChannelNorm(hidden_dim)
self.DOWNSAMPLING = 160
def get_output_dim(self):
return self.conv4.out_channels
def forward(self, x):
x = F.relu(self.batchNorm0(self.conv0(x)))
x = F.relu(self.batchNorm1(self.conv1(x)))
x = F.relu(self.batchNorm2(self.conv2(x)))
x = F.relu(self.batchNorm3(self.conv3(x)))
x = F.relu(self.batchNorm4(self.conv4(x)))
return x
class CPCAR(nn.Module):
def __init__(self, dim_encoded, dim_output, keep_hidden, num_layers):
super(CPCAR, self).__init__()
self.baseNet = nn.LSTM(
dim_encoded, dim_output, num_layers=num_layers, batch_first=True
)
self.hidden = None
self.keep_hidden = keep_hidden
def get_output_dim(self):
return self.baseNet.hidden_size
def forward(self, x):
try:
self.baseNet.flatten_parameters()
except RuntimeError:
pass
x, h = self.baseNet(x, self.hidden)
if self.keep_hidden:
if isinstance(h, tuple):
self.hidden = tuple(x.detach() for x in h)
else:
self.hidden = h.detach()
return x
class CPCModel(nn.Module):
def __init__(self, encoder, ar_net):
super(CPCModel, self).__init__()
self.gEncoder = encoder
self.gAR = ar_net
self.config = None
def forward(self, x, label):
encoded = self.gEncoder(x).permute(0, 2, 1)
cpc_feature = self.gAR(encoded)
return cpc_feature, encoded, label
def extract_features(self, source, get_encoded=False, norm_output=False):
cpc_feature, encoded, _ = self.forward(source, None)
if get_encoded:
cpc_feature = encoded
if norm_output:
mean = cpc_feature.mean(dim=1, keepdim=True)
var = cpc_feature.var(dim=1, keepdim=True)
cpc_feature = (cpc_feature - mean) / torch.sqrt(var + 1e-08)
return cpc_feature
| 6,525 | 32.813472 | 82 |
py
|
sign-topic
|
sign-topic-main/examples/paraphraser/paraphrase.py
|
#!/usr/bin/env python3 -u
import argparse
import fileinput
import logging
import os
import sys
from fairseq.models.transformer import TransformerModel
logging.getLogger().setLevel(logging.INFO)
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("--en2fr", required=True, help="path to en2fr model")
parser.add_argument(
"--fr2en", required=True, help="path to fr2en mixture of experts model"
)
parser.add_argument(
"--user-dir", help="path to fairseq examples/translation_moe/src directory"
)
parser.add_argument(
"--num-experts",
type=int,
default=10,
help="(keep at 10 unless using a different model)",
)
parser.add_argument(
"files",
nargs="*",
default=["-"],
help='input files to paraphrase; "-" for stdin',
)
args = parser.parse_args()
if args.user_dir is None:
args.user_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), # examples/
"translation_moe",
"src",
)
if os.path.exists(args.user_dir):
logging.info("found user_dir:" + args.user_dir)
else:
raise RuntimeError(
"cannot find fairseq examples/translation_moe/src "
"(tried looking here: {})".format(args.user_dir)
)
logging.info("loading en2fr model from:" + args.en2fr)
en2fr = TransformerModel.from_pretrained(
model_name_or_path=args.en2fr,
tokenizer="moses",
bpe="sentencepiece",
).eval()
logging.info("loading fr2en model from:" + args.fr2en)
fr2en = TransformerModel.from_pretrained(
model_name_or_path=args.fr2en,
tokenizer="moses",
bpe="sentencepiece",
user_dir=args.user_dir,
task="translation_moe",
).eval()
def gen_paraphrases(en):
fr = en2fr.translate(en)
return [
fr2en.translate(fr, inference_step_args={"expert": i})
for i in range(args.num_experts)
]
logging.info("Type the input sentence and press return:")
for line in fileinput.input(args.files):
line = line.strip()
if len(line) == 0:
continue
for paraphrase in gen_paraphrases(line):
print(paraphrase)
if __name__ == "__main__":
main()
| 2,408 | 27.011628 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/drnmt_rerank.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Score raw text with a trained model.
"""
from collections import namedtuple
import logging
from multiprocessing import Pool
import sys
import os
import random
import numpy as np
import sacrebleu
import torch
from fairseq import checkpoint_utils, options, utils
logger = logging.getLogger("fairseq_cli.drnmt_rerank")
logger.setLevel(logging.INFO)
Batch = namedtuple("Batch", "ids src_tokens src_lengths")
pool_init_variables = {}
def init_loaded_scores(mt_scores, model_scores, hyp, ref):
global pool_init_variables
pool_init_variables["mt_scores"] = mt_scores
pool_init_variables["model_scores"] = model_scores
pool_init_variables["hyp"] = hyp
pool_init_variables["ref"] = ref
def parse_fairseq_gen(filename, task):
source = {}
hypos = {}
scores = {}
with open(filename, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line.startswith("S-"): # source
uid, text = line.split("\t", 1)
uid = int(uid[2:])
source[uid] = text
elif line.startswith("D-"): # hypo
uid, score, text = line.split("\t", 2)
uid = int(uid[2:])
if uid not in hypos:
hypos[uid] = []
scores[uid] = []
hypos[uid].append(text)
scores[uid].append(float(score))
else:
continue
source_out = [source[i] for i in range(len(hypos))]
hypos_out = [h for i in range(len(hypos)) for h in hypos[i]]
scores_out = [s for i in range(len(scores)) for s in scores[i]]
return source_out, hypos_out, scores_out
def read_target(filename):
with open(filename, "r", encoding="utf-8") as f:
output = [line.strip() for line in f]
return output
def make_batches(args, src, hyp, task, max_positions, encode_fn):
assert len(src) * args.beam == len(
hyp
), f"Expect {len(src) * args.beam} hypotheses for {len(src)} source sentences with beam size {args.beam}. Got {len(hyp)} hypotheses intead."
hyp_encode = [
task.source_dictionary.encode_line(encode_fn(h), add_if_not_exist=False).long()
for h in hyp
]
if task.cfg.include_src:
src_encode = [
task.source_dictionary.encode_line(
encode_fn(s), add_if_not_exist=False
).long()
for s in src
]
tokens = [(src_encode[i // args.beam], h) for i, h in enumerate(hyp_encode)]
lengths = [(t1.numel(), t2.numel()) for t1, t2 in tokens]
else:
tokens = [(h,) for h in hyp_encode]
lengths = [(h.numel(),) for h in hyp_encode]
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch["id"],
src_tokens=batch["net_input"]["src_tokens"],
src_lengths=batch["net_input"]["src_lengths"],
)
def decode_rerank_scores(args):
if args.max_tokens is None and args.batch_size is None:
args.batch_size = 1
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load ensemble
logger.info("loading model(s) from {}".format(args.path))
models, _model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path], arg_overrides=eval(args.model_overrides),
)
for model in models:
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Initialize generator
generator = task.build_generator(args)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(args)
bpe = task.build_bpe(args)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
src, hyp, mt_scores = parse_fairseq_gen(args.in_text, task)
model_scores = {}
logger.info("decode reranker score")
for batch in make_batches(args, src, hyp, task, max_positions, encode_fn):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths},
}
scores = task.inference_step(generator, models, sample)
for id, sc in zip(batch.ids.tolist(), scores.tolist()):
model_scores[id] = sc[0]
model_scores = [model_scores[i] for i in range(len(model_scores))]
return src, hyp, mt_scores, model_scores
def get_score(mt_s, md_s, w1, lp, tgt_len):
return mt_s / (tgt_len ** lp) * w1 + md_s
def get_best_hyps(mt_scores, md_scores, hypos, fw_weight, lenpen, beam):
assert len(mt_scores) == len(md_scores) and len(mt_scores) == len(hypos)
hypo_scores = []
best_hypos = []
best_scores = []
offset = 0
for i in range(len(hypos)):
tgt_len = len(hypos[i].split())
hypo_scores.append(
get_score(mt_scores[i], md_scores[i], fw_weight, lenpen, tgt_len)
)
if (i + 1) % beam == 0:
max_i = np.argmax(hypo_scores)
best_hypos.append(hypos[offset + max_i])
best_scores.append(hypo_scores[max_i])
hypo_scores = []
offset += beam
return best_hypos, best_scores
def eval_metric(args, hypos, ref):
if args.metric == "bleu":
score = sacrebleu.corpus_bleu(hypos, [ref]).score
else:
score = sacrebleu.corpus_ter(hypos, [ref]).score
return score
def score_target_hypo(args, fw_weight, lp):
mt_scores = pool_init_variables["mt_scores"]
model_scores = pool_init_variables["model_scores"]
hyp = pool_init_variables["hyp"]
ref = pool_init_variables["ref"]
best_hypos, _ = get_best_hyps(
mt_scores, model_scores, hyp, fw_weight, lp, args.beam
)
rerank_eval = None
if ref:
rerank_eval = eval_metric(args, best_hypos, ref)
print(f"fw_weight {fw_weight}, lenpen {lp}, eval {rerank_eval}")
return rerank_eval
def print_result(best_scores, best_hypos, output_file):
for i, (s, h) in enumerate(zip(best_scores, best_hypos)):
print(f"{i}\t{s}\t{h}", file=output_file)
def main(args):
utils.import_user_module(args)
src, hyp, mt_scores, model_scores = decode_rerank_scores(args)
assert (
not args.tune or args.target_text is not None
), "--target-text has to be set when tuning weights"
if args.target_text:
ref = read_target(args.target_text)
assert len(src) == len(
ref
), f"different numbers of source and target sentences ({len(src)} vs. {len(ref)})"
orig_best_hypos = [hyp[i] for i in range(0, len(hyp), args.beam)]
orig_eval = eval_metric(args, orig_best_hypos, ref)
if args.tune:
logger.info("tune weights for reranking")
random_params = np.array(
[
[
random.uniform(
args.lower_bound_fw_weight, args.upper_bound_fw_weight
),
random.uniform(args.lower_bound_lenpen, args.upper_bound_lenpen),
]
for k in range(args.num_trials)
]
)
logger.info("launching pool")
with Pool(
32,
initializer=init_loaded_scores,
initargs=(mt_scores, model_scores, hyp, ref),
) as p:
rerank_scores = p.starmap(
score_target_hypo,
[
(args, random_params[i][0], random_params[i][1],)
for i in range(args.num_trials)
],
)
if args.metric == "bleu":
best_index = np.argmax(rerank_scores)
else:
best_index = np.argmin(rerank_scores)
best_fw_weight = random_params[best_index][0]
best_lenpen = random_params[best_index][1]
else:
assert (
args.lenpen is not None and args.fw_weight is not None
), "--lenpen and --fw-weight should be set"
best_fw_weight, best_lenpen = args.fw_weight, args.lenpen
best_hypos, best_scores = get_best_hyps(
mt_scores, model_scores, hyp, best_fw_weight, best_lenpen, args.beam
)
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(
args.results_path, "generate-{}.txt".format(args.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as o:
print_result(best_scores, best_hypos, o)
else:
print_result(best_scores, best_hypos, sys.stdout)
if args.target_text:
rerank_eval = eval_metric(args, best_hypos, ref)
print(f"before reranking, {args.metric.upper()}:", orig_eval)
print(
f"after reranking with fw_weight={best_fw_weight}, lenpen={best_lenpen}, {args.metric.upper()}:",
rerank_eval,
)
def cli_main():
parser = options.get_generation_parser(interactive=True)
parser.add_argument(
"--in-text",
default=None,
required=True,
help="text from fairseq-interactive output, containing source sentences and hypotheses",
)
parser.add_argument("--target-text", default=None, help="reference text")
parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu")
parser.add_argument(
"--tune",
action="store_true",
help="if set, tune weights on fw scores and lenpen instead of applying fixed weights for reranking",
)
parser.add_argument(
"--lower-bound-fw-weight",
default=0.0,
type=float,
help="lower bound of search space",
)
parser.add_argument(
"--upper-bound-fw-weight",
default=3,
type=float,
help="upper bound of search space",
)
parser.add_argument(
"--lower-bound-lenpen",
default=0.0,
type=float,
help="lower bound of search space",
)
parser.add_argument(
"--upper-bound-lenpen",
default=3,
type=float,
help="upper bound of search space",
)
parser.add_argument(
"--fw-weight", type=float, default=None, help="weight on the fw model score"
)
parser.add_argument(
"--num-trials",
default=1000,
type=int,
help="number of trials to do for random search",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 11,312 | 29.994521 | 144 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/__init__.py
|
from . import criterions, models, tasks # noqa
| 48 | 23.5 | 47 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/criterions/discriminative_reranking_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
_EPSILON = torch.finfo(torch.float32).eps
TARGET_DIST_NORM_CHOICES = ChoiceEnum(["none", "minmax"])
@dataclass
class KLDivergenceRerankingCriterionConfig(FairseqDataclass):
target_dist_norm: TARGET_DIST_NORM_CHOICES = field(
default="none",
metadata={"help": "method to normalize the range of target scores"},
)
temperature: float = field(
default=1.0,
metadata={"help": "temperature in softmax for target distributions"},
)
forward_batch_size: int = field(
default=32,
metadata={
"help": "number of hypotheses per batch for model forward (set a value smaller than --mt-beam to avoid OOM when training with a large beam size)"
},
)
@register_criterion(
"kl_divergence_rereanking", dataclass=KLDivergenceRerankingCriterionConfig
)
class KLDivergenceRerankingCriterion(FairseqCriterion):
def __init__(
self, task, target_dist_norm, temperature, forward_batch_size,
):
super().__init__(task)
self.target_dist_norm = target_dist_norm
self.temperature = temperature
self.forward_batch_size = forward_batch_size
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
sample_size = sample["id"].numel()
assert sample_size % self.task.cfg.mt_beam == 0, (
f"sample_size ({sample_size}) cannot be divided by beam size ({self.task.cfg.mt_beam})."
f"Please set --required-batch-size-multiple={self.task.cfg.mt_beam}."
)
# split into smaller batches for model forward
batch_out = []
for i in range(0, sample_size, self.forward_batch_size):
j = min(i + self.forward_batch_size, sample_size)
out = model(
src_tokens=sample["net_input"]["src_tokens"][i:j, :],
src_lengths=sample["net_input"]["src_lengths"][i:j],
)
batch_out.append(
model.sentence_forward(out, sample["net_input"]["src_tokens"][i:j, :])
)
batch_out = torch.cat(batch_out, dim=0).view(
self.task.cfg.mt_beam, sample_size // self.task.cfg.mt_beam, -1
) # T x B x C
if model.joint_classification == "sent":
batch_out = model.joint_forward(batch_out)
scores = model.classification_forward(batch_out.view(sample_size, 1, -1)).view(
-1, self.task.cfg.mt_beam
) # input: B x T x C
loss = self.compute_kl_loss(
scores, sample["target"][:, 0].view(-1, self.task.cfg.mt_beam)
)
sample_size = sample_size // self.task.cfg.mt_beam
logging_output = {
"loss": loss.detach(),
"ntokens": sample["ntokens"],
"nsentences": sample_size * self.task.cfg.mt_beam,
"sample_size": sample_size,
"scores": scores.detach(),
}
return loss, sample_size, logging_output
def compute_kl_loss(self, logits, target):
norm_target = target
if self.target_dist_norm == "minmax":
min_v = torch.min(target, 1, keepdim=True).values
max_v = torch.max(target, 1, keepdim=True).values
norm_target = (target - min_v) / (max_v - min_v + _EPSILON)
target_dist = F.softmax(
norm_target / self.temperature, dim=-1, dtype=torch.float32
)
model_dist = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = -(target_dist * model_dist - target_dist * target_dist.log()).sum()
return loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
loss = loss_sum / sample_size / math.log(2)
metrics.log_scalar("loss", loss, sample_size, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 4,997 | 34.956835 | 157 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/criterions/__init__.py
|
from .discriminative_reranking_criterion import KLDivergenceRerankingCriterion
__all__ = [
"KLDivergenceRerankingCriterion",
]
| 133 | 18.142857 | 78 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/models/discriminative_reranking_model.py
|
from dataclasses import dataclass, field
import os
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
BaseFairseqModel,
register_model,
)
from fairseq.models.roberta.model import RobertaClassificationHead
from fairseq.modules import (
LayerNorm,
TransformerSentenceEncoder,
TransformerSentenceEncoderLayer,
)
ACTIVATION_FN_CHOICES = ChoiceEnum(utils.get_available_activation_fns())
JOINT_CLASSIFICATION_CHOICES = ChoiceEnum(["none", "sent"])
SENTENCE_REP_CHOICES = ChoiceEnum(["head", "meanpool", "maxpool"])
def update_init_roberta_model_state(state):
"""
update the state_dict of a Roberta model for initializing
weights of the BertRanker
"""
for k in list(state.keys()):
if ".lm_head." in k or "version" in k:
del state[k]
continue
# remove 'encoder/decoder.sentence_encoder.' from the key
assert k.startswith("encoder.sentence_encoder.") or k.startswith(
"decoder.sentence_encoder."
), f"Cannot recognize parameter name {k}"
if "layernorm_embedding" in k:
new_k = k.replace(".layernorm_embedding.", ".emb_layer_norm.")
state[new_k[25:]] = state[k]
else:
state[k[25:]] = state[k]
del state[k]
class BaseRanker(nn.Module):
def __init__(self, args, task):
super().__init__()
self.separator_token = task.dictionary.eos()
self.padding_idx = task.dictionary.pad()
def forward(self, src_tokens):
raise NotImplementedError
def get_segment_labels(self, src_tokens):
segment_boundary = (src_tokens == self.separator_token).long()
segment_labels = (
segment_boundary.cumsum(dim=1)
- segment_boundary
- (src_tokens == self.padding_idx).long()
)
return segment_labels
def get_positions(self, src_tokens, segment_labels):
segment_positions = (
torch.arange(src_tokens.shape[1])
.to(src_tokens.device)
.repeat(src_tokens.shape[0], 1)
)
segment_boundary = (src_tokens == self.separator_token).long()
_, col_idx = (segment_positions * segment_boundary).nonzero(as_tuple=True)
col_idx = torch.cat([torch.zeros(1).type_as(col_idx), col_idx])
offset = torch.cat(
[
torch.zeros(1).type_as(segment_boundary),
segment_boundary.sum(dim=1).cumsum(dim=0)[:-1],
]
)
segment_positions -= col_idx[segment_labels + offset.unsqueeze(1)] * (
segment_labels != 0
)
padding_mask = src_tokens.ne(self.padding_idx)
segment_positions = (segment_positions + 1) * padding_mask.type_as(
segment_positions
) + self.padding_idx
return segment_positions
class BertRanker(BaseRanker):
def __init__(self, args, task):
super(BertRanker, self).__init__(args, task)
init_model = getattr(args, "pretrained_model", "")
self.joint_layers = nn.ModuleList()
if os.path.isfile(init_model):
print(f"initialize weight from {init_model}")
from fairseq import hub_utils
x = hub_utils.from_pretrained(
os.path.dirname(init_model),
checkpoint_file=os.path.basename(init_model),
)
in_state_dict = x["models"][0].state_dict()
init_args = x["args"].model
num_positional_emb = init_args.max_positions + task.dictionary.pad() + 1
# follow the setup in roberta
self.model = TransformerSentenceEncoder(
padding_idx=task.dictionary.pad(),
vocab_size=len(task.dictionary),
num_encoder_layers=getattr(
args, "encoder_layers", init_args.encoder_layers
),
embedding_dim=init_args.encoder_embed_dim,
ffn_embedding_dim=init_args.encoder_ffn_embed_dim,
num_attention_heads=init_args.encoder_attention_heads,
dropout=init_args.dropout,
attention_dropout=init_args.attention_dropout,
activation_dropout=init_args.activation_dropout,
num_segments=2, # add language embeddings
max_seq_len=num_positional_emb,
offset_positions_by_padding=False,
encoder_normalize_before=True,
apply_bert_init=True,
activation_fn=init_args.activation_fn,
freeze_embeddings=args.freeze_embeddings,
n_trans_layers_to_freeze=args.n_trans_layers_to_freeze,
)
# still need to learn segment embeddings as we added a second language embedding
if args.freeze_embeddings:
for p in self.model.segment_embeddings.parameters():
p.requires_grad = False
update_init_roberta_model_state(in_state_dict)
print("loading weights from the pretrained model")
self.model.load_state_dict(
in_state_dict, strict=False
) # ignore mismatch in language embeddings
ffn_embedding_dim = init_args.encoder_ffn_embed_dim
num_attention_heads = init_args.encoder_attention_heads
dropout = init_args.dropout
attention_dropout = init_args.attention_dropout
activation_dropout = init_args.activation_dropout
activation_fn = init_args.activation_fn
classifier_embed_dim = getattr(
args, "embed_dim", init_args.encoder_embed_dim
)
if classifier_embed_dim != init_args.encoder_embed_dim:
self.transform_layer = nn.Linear(
init_args.encoder_embed_dim, classifier_embed_dim
)
else:
self.model = TransformerSentenceEncoder(
padding_idx=task.dictionary.pad(),
vocab_size=len(task.dictionary),
num_encoder_layers=args.encoder_layers,
embedding_dim=args.embed_dim,
ffn_embedding_dim=args.ffn_embed_dim,
num_attention_heads=args.attention_heads,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
max_seq_len=task.max_positions()
if task.max_positions()
else args.tokens_per_sample,
num_segments=2,
offset_positions_by_padding=False,
encoder_normalize_before=args.encoder_normalize_before,
apply_bert_init=args.apply_bert_init,
activation_fn=args.activation_fn,
)
classifier_embed_dim = args.embed_dim
ffn_embedding_dim = args.ffn_embed_dim
num_attention_heads = args.attention_heads
dropout = args.dropout
attention_dropout = args.attention_dropout
activation_dropout = args.activation_dropout
activation_fn = args.activation_fn
self.joint_classification = args.joint_classification
if args.joint_classification == "sent":
if args.joint_normalize_before:
self.joint_layer_norm = LayerNorm(classifier_embed_dim)
else:
self.joint_layer_norm = None
self.joint_layers = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=classifier_embed_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
)
for _ in range(args.num_joint_layers)
]
)
self.classifier = RobertaClassificationHead(
classifier_embed_dim,
classifier_embed_dim,
1, # num_classes
"tanh",
args.classifier_dropout,
)
def forward(self, src_tokens, src_lengths):
segment_labels = self.get_segment_labels(src_tokens)
positions = self.get_positions(src_tokens, segment_labels)
inner_states, _ = self.model(
tokens=src_tokens,
segment_labels=segment_labels,
last_state_only=True,
positions=positions,
)
return inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C
def sentence_forward(self, encoder_out, src_tokens=None, sentence_rep="head"):
# encoder_out: B x T x C
if sentence_rep == "head":
x = encoder_out[:, :1, :]
else: # 'meanpool', 'maxpool'
assert src_tokens is not None, "meanpool requires src_tokens input"
segment_labels = self.get_segment_labels(src_tokens)
padding_mask = src_tokens.ne(self.padding_idx)
encoder_mask = segment_labels * padding_mask.type_as(segment_labels)
if sentence_rep == "meanpool":
ntokens = torch.sum(encoder_mask, dim=1, keepdim=True)
x = torch.sum(
encoder_out * encoder_mask.unsqueeze(2), dim=1, keepdim=True
) / ntokens.unsqueeze(2).type_as(encoder_out)
else: # 'maxpool'
encoder_out[
(encoder_mask == 0).unsqueeze(2).repeat(1, 1, encoder_out.shape[-1])
] = -float("inf")
x, _ = torch.max(encoder_out, dim=1, keepdim=True)
if hasattr(self, "transform_layer"):
x = self.transform_layer(x)
return x # B x 1 x C
def joint_forward(self, x):
# x: T x B x C
if self.joint_layer_norm:
x = self.joint_layer_norm(x.transpose(0, 1))
x = x.transpose(0, 1)
for layer in self.joint_layers:
x, _ = layer(x, self_attn_padding_mask=None)
return x
def classification_forward(self, x):
# x: B x T x C
return self.classifier(x)
@dataclass
class DiscriminativeNMTRerankerConfig(FairseqDataclass):
pretrained_model: str = field(
default="", metadata={"help": "pretrained model to load"}
)
sentence_rep: SENTENCE_REP_CHOICES = field(
default="head",
metadata={
"help": "method to transform the output of the transformer stack to a sentence-level representation"
},
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
classifier_dropout: float = field(
default=0.0, metadata={"help": "classifier dropout probability"}
)
embed_dim: int = field(default=768, metadata={"help": "embedding dimension"})
ffn_embed_dim: int = field(
default=2048, metadata={"help": "embedding dimension for FFN"}
)
encoder_layers: int = field(default=12, metadata={"help": "num encoder layers"})
attention_heads: int = field(default=8, metadata={"help": "num attention heads"})
encoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each encoder block"}
)
apply_bert_init: bool = field(
default=False, metadata={"help": "use custom param initialization for BERT"}
)
activation_fn: ACTIVATION_FN_CHOICES = field(
default="relu", metadata={"help": "activation function to use"}
)
freeze_embeddings: bool = field(
default=False, metadata={"help": "freeze embeddings in the pretrained model"}
)
n_trans_layers_to_freeze: int = field(
default=0,
metadata={
"help": "number of layers to freeze in the pretrained transformer model"
},
)
# joint classfication
joint_classification: JOINT_CLASSIFICATION_CHOICES = field(
default="none",
metadata={"help": "method to compute joint features for classification"},
)
num_joint_layers: int = field(
default=1, metadata={"help": "number of joint layers"}
)
joint_normalize_before: bool = field(
default=False,
metadata={"help": "apply layer norm on the input to the joint layer"},
)
@register_model(
"discriminative_nmt_reranker", dataclass=DiscriminativeNMTRerankerConfig
)
class DiscriminativeNMTReranker(BaseFairseqModel):
@classmethod
def build_model(cls, args, task):
model = BertRanker(args, task)
return DiscriminativeNMTReranker(args, model)
def __init__(self, args, model):
super().__init__()
self.model = model
self.sentence_rep = args.sentence_rep
self.joint_classification = args.joint_classification
def forward(self, src_tokens, src_lengths, **kwargs):
return self.model(src_tokens, src_lengths)
def sentence_forward(self, encoder_out, src_tokens):
return self.model.sentence_forward(encoder_out, src_tokens, self.sentence_rep)
def joint_forward(self, x):
return self.model.joint_forward(x)
def classification_forward(self, x):
return self.model.classification_forward(x)
| 13,714 | 36.472678 | 112 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/models/__init__.py
|
from .discriminative_reranking_model import DiscriminativeNMTReranker
__all__ = [
"DiscriminativeNMTReranker",
]
| 119 | 16.142857 | 69 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/scripts/prep_data.py
|
#!/usr/bin/env python
import argparse
from multiprocessing import Pool
from pathlib import Path
import sacrebleu
import sentencepiece as spm
def read_text_file(filename):
with open(filename, "r") as f:
output = [line.strip() for line in f]
return output
def get_bleu(in_sent, target_sent):
bleu = sacrebleu.corpus_bleu([in_sent], [[target_sent]])
out = " ".join(
map(str, [bleu.score, bleu.sys_len, bleu.ref_len] + bleu.counts + bleu.totals)
)
return out
def get_ter(in_sent, target_sent):
ter = sacrebleu.corpus_ter([in_sent], [[target_sent]])
out = " ".join(map(str, [ter.score, ter.num_edits, ter.ref_length]))
return out
def init(sp_model):
global sp
sp = spm.SentencePieceProcessor()
sp.Load(sp_model)
def process(source_sent, target_sent, hypo_sent, metric):
source_bpe = " ".join(sp.EncodeAsPieces(source_sent))
hypo_bpe = [" ".join(sp.EncodeAsPieces(h)) for h in hypo_sent]
if metric == "bleu":
score_str = [get_bleu(h, target_sent) for h in hypo_sent]
else: # ter
score_str = [get_ter(h, target_sent) for h in hypo_sent]
return source_bpe, hypo_bpe, score_str
def main(args):
assert (
args.split.startswith("train") or args.num_shards == 1
), "--num-shards should be set to 1 for valid and test sets"
assert (
args.split.startswith("train")
or args.split.startswith("valid")
or args.split.startswith("test")
), "--split should be set to train[n]/valid[n]/test[n]"
source_sents = read_text_file(args.input_source)
target_sents = read_text_file(args.input_target)
num_sents = len(source_sents)
assert num_sents == len(
target_sents
), f"{args.input_source} and {args.input_target} should have the same number of sentences."
hypo_sents = read_text_file(args.input_hypo)
assert (
len(hypo_sents) % args.beam == 0
), f"Number of hypotheses ({len(hypo_sents)}) cannot be divided by beam size ({args.beam})."
hypo_sents = [
hypo_sents[i : i + args.beam] for i in range(0, len(hypo_sents), args.beam)
]
assert num_sents == len(
hypo_sents
), f"{args.input_hypo} should contain {num_sents * args.beam} hypotheses but only has {len(hypo_sents) * args.beam}. (--beam={args.beam})"
output_dir = args.output_dir / args.metric
for ns in range(args.num_shards):
print(f"processing shard {ns+1}/{args.num_shards}")
shard_output_dir = output_dir / f"split{ns+1}"
source_output_dir = shard_output_dir / "input_src"
hypo_output_dir = shard_output_dir / "input_tgt"
metric_output_dir = shard_output_dir / args.metric
source_output_dir.mkdir(parents=True, exist_ok=True)
hypo_output_dir.mkdir(parents=True, exist_ok=True)
metric_output_dir.mkdir(parents=True, exist_ok=True)
if args.n_proc > 1:
with Pool(
args.n_proc, initializer=init, initargs=(args.sentencepiece_model,)
) as p:
output = p.starmap(
process,
[
(source_sents[i], target_sents[i], hypo_sents[i], args.metric)
for i in range(ns, num_sents, args.num_shards)
],
)
else:
init(args.sentencepiece_model)
output = [
process(source_sents[i], target_sents[i], hypo_sents[i], args.metric)
for i in range(ns, num_sents, args.num_shards)
]
with open(source_output_dir / f"{args.split}.bpe", "w") as s_o, open(
hypo_output_dir / f"{args.split}.bpe", "w"
) as h_o, open(metric_output_dir / f"{args.split}.{args.metric}", "w") as m_o:
for source_bpe, hypo_bpe, score_str in output:
assert len(hypo_bpe) == len(score_str)
for h, m in zip(hypo_bpe, score_str):
s_o.write(f"{source_bpe}\n")
h_o.write(f"{h}\n")
m_o.write(f"{m}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-source", type=Path, required=True)
parser.add_argument("--input-target", type=Path, required=True)
parser.add_argument("--input-hypo", type=Path, required=True)
parser.add_argument("--output-dir", type=Path, required=True)
parser.add_argument("--split", type=str, required=True)
parser.add_argument("--beam", type=int, required=True)
parser.add_argument("--sentencepiece-model", type=str, required=True)
parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu")
parser.add_argument("--num-shards", type=int, default=1)
parser.add_argument("--n-proc", type=int, default=8)
args = parser.parse_args()
main(args)
| 4,872 | 34.569343 | 142 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/tasks/discriminative_reranking_task.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import logging
import os
import numpy as np
import torch
from fairseq import metrics
from fairseq.data import (
ConcatDataset,
ConcatSentencesDataset,
data_utils,
Dictionary,
IdDataset,
indexed_dataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
TruncateDataset,
TokenBlockDataset,
)
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II, MISSING
EVAL_BLEU_ORDER = 4
TARGET_METRIC_CHOICES = ChoiceEnum(["bleu", "ter"])
logger = logging.getLogger(__name__)
@dataclass
class DiscriminativeRerankingNMTConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
num_data_splits: int = field(
default=1, metadata={"help": "total number of data splits"}
)
no_shuffle: bool = field(
default=False, metadata={"help": "do not shuffle training data"}
)
max_positions: int = field(
default=512, metadata={"help": "number of positional embeddings to learn"}
)
include_src: bool = field(
default=False, metadata={"help": "include source sentence"}
)
mt_beam: int = field(default=50, metadata={"help": "beam size of input hypotheses"})
eval_target_metric: bool = field(
default=False,
metadata={"help": "evaluation with the target metric during validation"},
)
target_metric: TARGET_METRIC_CHOICES = field(
default="bleu", metadata={"help": "name of the target metric to optimize for"}
)
train_subset: str = field(
default=II("dataset.train_subset"),
metadata={"help": "data subset to use for training (e.g. train, valid, test)"},
)
seed: int = field(
default=II("common.seed"),
metadata={"help": "pseudo random number generator seed"},
)
class RerankerScorer(object):
"""Scores the target for a given (source (optional), target) input."""
def __init__(self, args, mt_beam):
self.mt_beam = mt_beam
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample["net_input"]
assert len(models) == 1, "does not support model ensemble"
model = models[0]
bs = net_input["src_tokens"].shape[0]
assert (
model.joint_classification == "none" or bs % self.mt_beam == 0
), f"invalid batch size ({bs}) for joint classification with beam size ({self.mt_beam})"
model.eval()
logits = model(**net_input)
batch_out = model.sentence_forward(logits, net_input["src_tokens"])
if model.joint_classification == "sent":
batch_out = model.joint_forward(
batch_out.view(self.mt_beam, bs // self.mt_beam, -1)
)
scores = model.classification_forward(
batch_out.view(bs, 1, -1)
) # input: B x T x C
return scores
@register_task(
"discriminative_reranking_nmt", dataclass=DiscriminativeRerankingNMTConfig
)
class DiscriminativeRerankingNMTTask(FairseqTask):
"""
Translation rerank task.
The input can be either (src, tgt) sentence pairs or tgt sentence only.
"""
cfg: DiscriminativeRerankingNMTConfig
def __init__(self, cfg: DiscriminativeRerankingNMTConfig, data_dictionary=None):
super().__init__(cfg)
self.dictionary = data_dictionary
self._max_positions = cfg.max_positions
# args.tokens_per_sample = self._max_positions
# self.num_classes = 1 # for model
@classmethod
def load_dictionary(cls, cfg, filename):
"""Load the dictionary from the filename"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>") # for loading pretrained XLMR model
return dictionary
@classmethod
def setup_task(cls, cfg: DiscriminativeRerankingNMTConfig, **kwargs):
# load data dictionary (assume joint dictionary)
data_path = cfg.data
data_dict = cls.load_dictionary(
cfg, os.path.join(data_path, "input_src/dict.txt")
)
logger.info("[input] src dictionary: {} types".format(len(data_dict)))
return DiscriminativeRerankingNMTTask(cfg, data_dict)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
if self.cfg.data.endswith("1"):
data_shard = (epoch - 1) % self.cfg.num_data_splits + 1
data_path = self.cfg.data[:-1] + str(data_shard)
else:
data_path = self.cfg.data
def get_path(type, data_split):
return os.path.join(data_path, str(type), data_split)
def make_dataset(type, dictionary, data_split, combine):
split_path = get_path(type, data_split)
dataset = data_utils.load_indexed_dataset(
split_path, dictionary, combine=combine,
)
return dataset
def load_split(data_split, metric):
input_src = None
if self.cfg.include_src:
input_src = make_dataset(
"input_src", self.dictionary, data_split, combine=False
)
assert input_src is not None, "could not find dataset: {}".format(
get_path("input_src", data_split)
)
input_tgt = make_dataset(
"input_tgt", self.dictionary, data_split, combine=False
)
assert input_tgt is not None, "could not find dataset: {}".format(
get_path("input_tgt", data_split)
)
label_path = f"{get_path(metric, data_split)}.{metric}"
assert os.path.exists(label_path), f"could not find dataset: {label_path}"
np_labels = np.loadtxt(label_path)
if self.cfg.target_metric == "ter":
np_labels = -np_labels
label = RawLabelDataset(np_labels)
return input_src, input_tgt, label
src_datasets = []
tgt_datasets = []
label_datasets = []
if split == self.cfg.train_subset:
for k in itertools.count():
split_k = "train" + (str(k) if k > 0 else "")
prefix = os.path.join(data_path, "input_tgt", split_k)
if not indexed_dataset.dataset_exists(prefix, impl=None):
if k > 0:
break
else:
raise FileNotFoundError(f"Dataset not found: {prefix}")
input_src, input_tgt, label = load_split(
split_k, self.cfg.target_metric
)
src_datasets.append(input_src)
tgt_datasets.append(input_tgt)
label_datasets.append(label)
else:
input_src, input_tgt, label = load_split(split, self.cfg.target_metric)
src_datasets.append(input_src)
tgt_datasets.append(input_tgt)
label_datasets.append(label)
if len(tgt_datasets) == 1:
input_tgt, label = tgt_datasets[0], label_datasets[0]
if self.cfg.include_src:
input_src = src_datasets[0]
else:
input_tgt = ConcatDataset(tgt_datasets)
label = ConcatDataset(label_datasets)
if self.cfg.include_src:
input_src = ConcatDataset(src_datasets)
input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)
if self.cfg.include_src:
input_src = PrependTokenDataset(input_src, self.dictionary.bos())
input_src = TruncateDataset(input_src, self.cfg.max_positions)
src_lengths = NumelDataset(input_src, reduce=False)
src_tokens = ConcatSentencesDataset(input_src, input_tgt)
else:
src_tokens = PrependTokenDataset(input_tgt, self.dictionary.bos())
src_lengths = NumelDataset(src_tokens, reduce=False)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens, pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths,
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
"target": label,
}
dataset = NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes],)
assert len(dataset) % self.cfg.mt_beam == 0, (
"dataset size (%d) is not a multiple of beam size (%d)"
% (len(dataset), self.cfg.mt_beam)
)
# no need to shuffle valid/test sets
if not self.cfg.no_shuffle and split == self.cfg.train_subset:
# need to keep all hypothese together
start_idx = np.arange(0, len(dataset), self.cfg.mt_beam)
with data_utils.numpy_seed(self.cfg.seed + epoch):
np.random.shuffle(start_idx)
idx = np.arange(0, self.cfg.mt_beam)
shuffle = np.tile(idx, (len(start_idx), 1)).reshape(-1) + np.tile(
start_idx, (self.cfg.mt_beam, 1)
).transpose().reshape(-1)
dataset = SortDataset(dataset, sort_order=[shuffle],)
logger.info(f"Loaded {split} with #samples: {len(dataset)}")
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
assert not self.cfg.include_src or len(src_tokens[0]) == 2
input_src = None
if self.cfg.include_src:
input_src = TokenBlockDataset(
[t[0] for t in src_tokens],
[l[0] for l in src_lengths],
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
)
input_src = PrependTokenDataset(input_src, self.dictionary.bos())
input_src = TruncateDataset(input_src, self.cfg.max_positions)
input_tgt = TokenBlockDataset(
[t[-1] for t in src_tokens],
[l[-1] for l in src_lengths],
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
)
input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)
if self.cfg.include_src:
src_tokens = ConcatSentencesDataset(input_src, input_tgt)
src_lengths = NumelDataset(input_src, reduce=False)
else:
input_tgt = PrependTokenDataset(input_tgt, self.dictionary.bos())
src_tokens = input_tgt
src_lengths = NumelDataset(src_tokens, reduce=False)
dataset = {
"id": IdDataset(),
"net_input": {
"src_tokens": RightPadDataset(
src_tokens, pad_idx=self.source_dictionary.pad(),
),
"src_lengths": src_lengths,
},
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_tokens, reduce=True),
}
return NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes],)
def build_model(self, cfg: FairseqDataclass):
return super().build_model(cfg)
def build_generator(self, args):
return RerankerScorer(args, mt_beam=self.cfg.mt_beam)
def max_positions(self):
return self._max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def create_dummy_batch(self, device):
dummy_target = (
torch.zeros(self.cfg.mt_beam, EVAL_BLEU_ORDER * 2 + 3).long().to(device)
if not self.cfg.eval_ter
else torch.zeros(self.cfg.mt_beam, 3).long().to(device)
)
return {
"id": torch.zeros(self.cfg.mt_beam, 1).long().to(device),
"net_input": {
"src_tokens": torch.zeros(self.cfg.mt_beam, 4).long().to(device),
"src_lengths": torch.ones(self.cfg.mt_beam, 1).long().to(device),
},
"nsentences": 0,
"ntokens": 0,
"target": dummy_target,
}
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
if ignore_grad and sample is None:
sample = self.create_dummy_batch(model.device)
return super().train_step(
sample, model, criterion, optimizer, update_num, ignore_grad
)
def valid_step(self, sample, model, criterion):
if sample is None:
sample = self.create_dummy_batch(model.device)
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if not self.cfg.eval_target_metric:
return loss, sample_size, logging_output
scores = logging_output["scores"]
if self.cfg.target_metric == "bleu":
assert sample["target"].shape[1] == EVAL_BLEU_ORDER * 2 + 3, (
"target does not contain enough information ("
+ str(sample["target"].shape[1])
+ "for evaluating BLEU"
)
max_id = torch.argmax(scores, dim=1)
select_id = max_id + torch.arange(
0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam
).to(max_id.device)
bleu_data = sample["target"][select_id, 1:].sum(0).data
logging_output["_bleu_sys_len"] = bleu_data[0]
logging_output["_bleu_ref_len"] = bleu_data[1]
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu_data[2 + i]
logging_output["_bleu_totals_" + str(i)] = bleu_data[
2 + EVAL_BLEU_ORDER + i
]
elif self.cfg.target_metric == "ter":
assert sample["target"].shape[1] == 3, (
"target does not contain enough information ("
+ str(sample["target"].shape[1])
+ "for evaluating TER"
)
max_id = torch.argmax(scores, dim=1)
select_id = max_id + torch.arange(
0, sample_size * self.cfg.mt_beam, self.cfg.mt_beam
).to(max_id.device)
ter_data = sample["target"][select_id, 1:].sum(0).data
logging_output["_ter_num_edits"] = -ter_data[0]
logging_output["_ter_ref_len"] = -ter_data[1]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if not self.cfg.eval_target_metric:
return
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
if self.cfg.target_metric == "bleu":
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth,
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
elif self.cfg.target_metric == "ter":
num_edits = sum_logs("_ter_num_edits")
ref_len = sum_logs("_ter_ref_len")
if ref_len > 0:
metrics.log_scalar("_ter_num_edits", num_edits)
metrics.log_scalar("_ter_ref_len", ref_len)
def compute_ter(meters):
score = meters["_ter_num_edits"].sum / meters["_ter_ref_len"].sum
return round(score.item(), 2)
metrics.log_derived("ter", compute_ter)
| 17,491 | 35.747899 | 96 |
py
|
sign-topic
|
sign-topic-main/examples/discriminative_reranking_nmt/tasks/__init__.py
|
from .discriminative_reranking_task import DiscriminativeRerankingNMTTask
__all__ = [
"DiscriminativeRerankingNMTTask",
]
| 128 | 17.428571 | 73 |
py
|
sign-topic
|
sign-topic-main/examples/pointer_generator/preprocess.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from itertools import zip_longest
def replace_oovs(source_in, target_in, vocabulary, source_out, target_out):
"""Replaces out-of-vocabulary words in source and target text with <unk-N>,
where N in is the position of the word in the source sequence.
"""
def format_unk(pos):
return "<unk-{}>".format(pos)
if target_in is None:
target_in = []
for seq_num, (source_seq, target_seq) in enumerate(
zip_longest(source_in, target_in)
):
source_seq_out = []
target_seq_out = []
word_to_pos = dict()
for position, token in enumerate(source_seq.strip().split()):
if token in vocabulary:
token_out = token
else:
if token in word_to_pos:
oov_pos = word_to_pos[token]
else:
word_to_pos[token] = position
oov_pos = position
token_out = format_unk(oov_pos)
source_seq_out.append(token_out)
source_out.write(" ".join(source_seq_out) + "\n")
if target_seq is not None:
for token in target_seq.strip().split():
if token in word_to_pos:
token_out = format_unk(word_to_pos[token])
else:
token_out = token
target_seq_out.append(token_out)
if target_out is not None:
target_out.write(" ".join(target_seq_out) + "\n")
def main():
parser = argparse.ArgumentParser(
description="Replaces out-of-vocabulary words in both source and target "
"sequences with tokens that indicate the position of the word "
"in the source sequence."
)
parser.add_argument(
"--source", type=str, help="text file with source sequences", required=True
)
parser.add_argument(
"--target", type=str, help="text file with target sequences", default=None
)
parser.add_argument("--vocab", type=str, help="vocabulary file", required=True)
parser.add_argument(
"--source-out",
type=str,
help="where to write source sequences with <unk-N> entries",
required=True,
)
parser.add_argument(
"--target-out",
type=str,
help="where to write target sequences with <unk-N> entries",
default=None,
)
args = parser.parse_args()
with open(args.vocab, encoding="utf-8") as vocab:
vocabulary = vocab.read().splitlines()
target_in = (
open(args.target, "r", encoding="utf-8") if args.target is not None else None
)
target_out = (
open(args.target_out, "w", encoding="utf-8")
if args.target_out is not None
else None
)
with open(args.source, "r", encoding="utf-8") as source_in, open(
args.source_out, "w", encoding="utf-8"
) as source_out:
replace_oovs(source_in, target_in, vocabulary, source_out, target_out)
if target_in is not None:
target_in.close()
if target_out is not None:
target_out.close()
if __name__ == "__main__":
main()
| 3,337 | 31.407767 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/pointer_generator/postprocess.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import re
import sys
class OOVIndexError(IndexError):
def __init__(self, pos, source_seq, target_seq):
super(OOVIndexError, self).__init__(
"A <unk-N> tag in the target sequence refers to a position that is "
"outside the source sequence. Most likely there was a mismatch in "
"provided source and target sequences. Otherwise this would mean that "
"the pointing mechanism somehow attended to a position that is past "
"the actual sequence end."
)
self.source_pos = pos
self.source_seq = source_seq
self.target_seq = target_seq
def replace_oovs(source_in, target_in, target_out):
"""Replaces <unk-N> tokens in the target text with the corresponding word in
the source text.
"""
oov_re = re.compile("^<unk-([0-9]+)>$")
for source_seq, target_seq in zip(source_in, target_in):
target_seq_out = []
pos_to_word = source_seq.strip().split()
for token in target_seq.strip().split():
m = oov_re.match(token)
if m:
pos = int(m.group(1))
if pos >= len(pos_to_word):
raise OOVIndexError(pos, source_seq, target_seq)
token_out = pos_to_word[pos]
else:
token_out = token
target_seq_out.append(token_out)
target_out.write(" ".join(target_seq_out) + "\n")
def main():
parser = argparse.ArgumentParser(
description="Replaces <unk-N> tokens in target sequences with words from "
"the corresponding position in the source sequence."
)
parser.add_argument(
"--source", type=str, help="text file with source sequences", required=True
)
parser.add_argument(
"--target", type=str, help="text file with target sequences", required=True
)
parser.add_argument(
"--target-out",
type=str,
help="where to write target sequences without <unk-N> " "entries",
required=True,
)
args = parser.parse_args()
target_in = (
open(args.target, "r", encoding="utf-8") if args.target is not None else None
)
target_out = (
open(args.target_out, "w", encoding="utf-8")
if args.target_out is not None
else None
)
with open(args.source, "r", encoding="utf-8") as source_in, open(
args.target, "r", encoding="utf-8"
) as target_in, open(args.target_out, "w", encoding="utf-8") as target_out:
replace_oovs(source_in, target_in, target_out)
if __name__ == "__main__":
try:
main()
except OOVIndexError as e:
print(e, file=sys.stderr)
print("Source sequence:", e.source_seq.strip(), file=sys.stderr)
print("Target sequence:", e.target_seq.strip(), file=sys.stderr)
print(
"Source sequence length:",
len(e.source_seq.strip().split()),
file=sys.stderr,
)
print("The offending tag points to:", e.source_pos)
sys.exit(2)
| 3,266 | 32.680412 | 85 |
py
|
sign-topic
|
sign-topic-main/examples/pointer_generator/pointer_generator_src/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import transformer_pg # noqa
| 215 | 29.857143 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/pointer_generator/pointer_generator_src/transformer_pg.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, Optional, List, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
)
from torch import Tensor
logger = logging.getLogger(__name__)
@register_model("transformer_pointer_generator")
class TransformerPointerGeneratorModel(TransformerModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani et al, 2017)
<https://arxiv.org/abs/1706.03762>`_, augmented with a pointer-generator
network from `"Get To The Point: Summarization with Pointer-Generator
Networks" (See et al, 2017) <https://arxiv.org/abs/1704.04368>`_.
Args:
encoder (TransformerPointerGeneratorEncoder): the encoder
decoder (TransformerPointerGeneratorDecoder): the decoder
The Transformer pointer-generator model provides the following named
architectures and command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_pointer_generator_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
TransformerModel.add_args(parser)
parser.add_argument('--alignment-heads', type=int, metavar='N',
help='number of attention heads to be used for '
'pointing')
parser.add_argument('--alignment-layer', type=int, metavar='I',
help='layer number to be used for pointing (0 '
'corresponding to the bottommost layer)')
parser.add_argument('--source-position-markers', type=int, metavar='N',
help='dictionary includes N additional items that '
'represent an OOV token at a particular input '
'position')
parser.add_argument('--force-generation', type=float, metavar='P',
default=None,
help='set the vocabulary distribution weight to P, '
'instead of predicting it from the input (1.0 '
'corresponding to generation, 0.0 to pointing)')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
if getattr(args, "source_position_markers", None) is None:
args.source_position_markers = args.max_source_positions
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if src_dict != tgt_dict:
raise ValueError("Pointer-generator requires a joined dictionary")
def build_embedding(dictionary, embed_dim, path=None):
# The dictionary may include additional items that can be used in
# place of the normal OOV token and that all map to the same
# embedding. Using a different token for each input position allows
# one to restore the word identities from the original source text.
num_embeddings = len(dictionary) - args.source_position_markers
padding_idx = dictionary.pad()
unk_idx = dictionary.unk()
logger.info(
"dictionary indices from {0} to {1} will be mapped to {2}".format(
num_embeddings, len(dictionary) - 1, unk_idx
)
)
emb = Embedding(num_embeddings, embed_dim, padding_idx, unk_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerPointerGeneratorEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerPointerGeneratorDecoder(args, tgt_dict, embed_tokens)
class TransformerPointerGeneratorEncoder(TransformerEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`. The pointer-generator variant adds
the source tokens to the encoder output as these are otherwise not passed
to the decoder.
"""
def forward(
self,
src_tokens,
src_lengths: Optional[Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[Tensor] = None
):
"""
Runs the `forward()` method of the parent Transformer class. Then adds
the source tokens into the encoder output tuple.
While it might be more elegant that the model would pass the source
tokens to the `forward()` method of the decoder too, this would require
changes to `SequenceGenerator`.
Args:
src_tokens (torch.LongTensor): tokens in the source language of
shape `(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
namedtuple:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
- **src_tokens** (Tensor): input token ids of shape
`(batch, src_len)`
"""
encoder_out = self.forward_scriptable(src_tokens,
src_lengths,
return_all_hiddens,
token_embeddings)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": encoder_out["encoder_out"], # T x B x C
"encoder_padding_mask": encoder_out["encoder_padding_mask"], # B x T
"encoder_embedding": encoder_out["encoder_embedding"], # B x T x C
"encoder_states": encoder_out["encoder_states"], # List[T x B x C]
"src_tokens": [src_tokens], # B x T
"src_lengths": [],
}
class TransformerPointerGeneratorDecoder(TransformerDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`. The pointer-generator variant mixes
the output probabilities with an attention distribution in the output layer.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False)
# In the pointer-generator model these arguments define the decoder
# layer and the number of attention heads that will be averaged to
# create the alignment for pointing.
self.alignment_heads = args.alignment_heads
self.alignment_layer = args.alignment_layer
input_embed_dim = embed_tokens.embedding_dim
# Generation probabilities / interpolation coefficients are predicted
# from the current decoder input embedding and the decoder output, which
# is the size of output_embed_dim.
p_gen_input_size = input_embed_dim + self.output_embed_dim
self.project_p_gens = nn.Linear(p_gen_input_size, 1)
nn.init.zeros_(self.project_p_gens.bias)
# The dictionary may include a separate entry for an OOV token in each
# input position, so that their identity can be restored from the
# original source text.
self.num_types = len(dictionary)
self.num_oov_types = args.source_position_markers
self.num_embeddings = self.num_types - self.num_oov_types
self.force_p_gen = args.force_generation
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = 0,
alignment_heads: Optional[int] = 1,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict, optional): dictionary used for storing
state during :ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False)
alignment_layer (int, optional): 0-based index of the layer to be
used for pointing (default: 0)
alignment_heads (int, optional): number of attention heads to be
used for pointing (default: 1)
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
# The normal Transformer model doesn't pass the alignment_layer and
# alignment_heads parameters correctly. We use our local variables.
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
alignment_layer=self.alignment_layer,
alignment_heads=self.alignment_heads,
)
if not features_only:
# Embedding the tokens again for generation probability prediction,
# so that we don't have to reimplement the whole extract_features()
# method.
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
prev_output_embed = self.embed_tokens(prev_output_tokens)
prev_output_embed *= self.embed_scale
predictors = torch.cat((prev_output_embed, x), 2)
p_gens = self.project_p_gens(predictors)
p_gens = torch.sigmoid(p_gens.float())
# Torchscript complains if encoder_out or attn are None because
# `output_layer()` signature expects tensors instead
attn: Optional[Tensor] = extra["attn"][0]
assert encoder_out is not None
assert attn is not None
x = self.output_layer(x, attn, encoder_out["src_tokens"][0], p_gens)
return x, extra
def output_layer(
self,
features: Tensor,
attn: Tensor,
src_tokens: Tensor,
p_gens: Tensor
) -> Tensor:
"""
Project features to the vocabulary size and mix with the attention
distributions.
"""
if self.force_p_gen is not None:
p_gens = self.force_p_gen
# project back to size of vocabulary
if self.adaptive_softmax is None:
logits = self.output_projection(features)
else:
logits = features
batch_size = logits.shape[0]
output_length = logits.shape[1]
assert logits.shape[2] == self.num_embeddings
assert src_tokens.shape[0] == batch_size
src_length = src_tokens.shape[1]
# The final output distribution will be a mixture of the normal output
# distribution (softmax of logits) and attention weights.
gen_dists = self.get_normalized_probs_scriptable(
(logits, None), log_probs=False, sample=None
)
gen_dists = torch.mul(gen_dists, p_gens)
padding_size = (batch_size, output_length, self.num_oov_types)
padding = gen_dists.new_zeros(padding_size)
gen_dists = torch.cat((gen_dists, padding), 2)
assert gen_dists.shape[2] == self.num_types
# Scatter attention distributions to distributions over the extended
# vocabulary in a tensor of shape [batch_size, output_length,
# vocab_size]. Each attention weight will be written into a location
# that is for other dimensions the same as in the index tensor, but for
# the third dimension it's the value of the index tensor (the token ID).
attn = torch.mul(attn.float(), 1 - p_gens)
index = src_tokens[:, None, :]
index = index.expand(batch_size, output_length, src_length)
attn_dists_size = (batch_size, output_length, self.num_types)
attn_dists = attn.new_zeros(attn_dists_size)
attn_dists.scatter_add_(2, index, attn.float())
# Final distributions, [batch_size, output_length, num_types].
return gen_dists + attn_dists
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""
Get normalized probabilities (or log probs) from a net's output.
Pointer-generator network output is already normalized.
"""
probs = net_output[0]
# Make sure the probabilities are greater than zero when returning log
# probabilities.
return probs.clamp(1e-10, 1.0).log() if log_probs else probs
class Embedding(nn.Embedding):
r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings. This subclass differs from the standard PyTorch Embedding class by
allowing additional vocabulary entries that will be mapped to the unknown token
embedding.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
padding_idx (int): Pads the output with the embedding vector at :attr:`padding_idx`
(initialized to zeros) whenever it encounters the index.
unk_idx (int): Maps all token indices that are greater than or equal to
num_embeddings to this index.
Attributes:
weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
initialized from :math:`\mathcal{N}(0, 1)`
Shape:
- Input: :math:`(*)`, LongTensor of arbitrary shape containing the indices to extract
- Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
.. note::
Keep in mind that only a limited number of optimizers support
sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
:class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
.. note::
With :attr:`padding_idx` set, the embedding vector at
:attr:`padding_idx` is initialized to all zeros. However, note that this
vector can be modified afterwards, e.g., using a customized
initialization method, and thus changing the vector used to pad the
output. The gradient for this vector from :class:`~torch.nn.Embedding`
is always zero.
"""
__constants__ = ["unk_idx"]
# Torchscript: Inheriting from Embedding class produces an error when exporting to Torchscript
# -> RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details
# It's happening because max_norm attribute from nn.Embedding is None by default and it cannot be
# cast to a C++ type
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int],
unk_idx: int,
max_norm: Optional[float] = float("inf"),
):
super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx, max_norm=max_norm)
self.unk_idx = unk_idx
nn.init.normal_(self.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(self.weight[padding_idx], 0)
def forward(self, input):
input = torch.where(
input >= self.num_embeddings, torch.ones_like(input) * self.unk_idx, input
)
return nn.functional.embedding(
input, self.weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse
)
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator"
)
def transformer_pointer_generator(args):
args.alignment_heads = getattr(args, "alignment_heads", 1)
args.alignment_layer = getattr(args, "alignment_layer", -1)
base_architecture(args)
if args.alignment_layer < 0:
args.alignment_layer = args.decoder_layers + args.alignment_layer
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator_iwslt_de_en"
)
def transformer_pointer_generator_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
transformer_pointer_generator(args)
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de"
)
def transformer_pointer_generator_wmt_en_de(args):
transformer_pointer_generator(args)
# Transformer pointer-generator with the base Transformer parameters as used in
# the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture(
"transformer_pointer_generator",
"transformer_pointer_generator_vaswani_wmt_en_de_big",
)
def transformer_pointer_generator_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
transformer_pointer_generator(args)
@register_model_architecture(
"transformer_pointer_generator",
"transformer_pointer_generator_vaswani_wmt_en_fr_big",
)
def transformer_pointer_generator_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_pointer_generator_vaswani_wmt_en_de_big(args)
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de_big"
)
def transformer_pointer_generator_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_pointer_generator_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture(
"transformer_pointer_generator", "transformer_pointer_generator_wmt_en_de_big_t2t"
)
def transformer_pointer_generator_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_pointer_generator_vaswani_wmt_en_de_big(args)
| 23,439 | 44.163776 | 102 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import tasks, criterions, models # noqa
| 226 | 31.428571 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/criterions/text_guide_cross_entropy_acc.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
from fairseq import metrics, utils
@register_criterion("guided_label_smoothed_cross_entropy_with_accuracy")
class GuidedCrossEntAccCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
guide_alpha,
text_input_cost_ratio,
label_smoothing,
disable_text_guide_update_num=0,
attentive_cost_regularization=0,
):
"""
guide_alpha: alpha to inteplate nll and kd loss
text_input_cost_ratio: loss ratio for text only input data
label_smoothing: label smoothing ratio
disable_text_guide_update_num: only use nll loss for the first N updates
attentive_cost_regularization: ratio fo attentive cost
"""
super().__init__(task)
self.alpha = guide_alpha
self.attn_beta = attentive_cost_regularization
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.text_input_cost_ratio = text_input_cost_ratio
self.disable_update_num = disable_text_guide_update_num
assert self.alpha >= 0 and self.alpha <= 1.0
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
# fmt: off
parser.add_argument('--guide-alpha', default=0., type=float, metavar='D',
help='alpha to merge kd cost from text to speech input with ce loss')
# fmt: off
parser.add_argument('--disable-text-guide-update-num', default=0, type=int, metavar='D',
help='disable guided target from text for the first N updates.')
parser.add_argument("--attentive-cost-regularization", default=0.0, type=float, metavar='D',
help="use encoder attentive loss regularization with cost ratio D")
parser.add_argument("--attentive-cost-without-normalize", action='store_true',
help="Don't do normalization during attentive cost computation")
def forward(self, model, sample, reduce=True):
reduction = 'sum' if reduce else 'none'
net_input = sample["net_input"]
net_output = model(**net_input)
attn_cost = None
lprobs = model.get_normalized_probs(net_output, log_probs=True)
is_dual_input = True if net_input['src_tokens'] is not None and net_input.get('src_txt_tokens') is not None else False
target = model.get_targets(sample, net_output)
src_token_num = 0
if is_dual_input:
# lprobs_spch from speech encoder and lprobs_text from text encoder
lprobs_spch, lprobs_text = torch.chunk(lprobs, 2)
lprobs_spch.batch_first = lprobs.batch_first
lprobs_text.batch_first = lprobs.batch_first
speech_loss, speech_nll_loss, speech_correct, speech_total = \
self.guide_loss_and_acc(model, lprobs_spch, lprobs_text, target, reduce=(reduction == 'sum'))
text_loss, text_nll_loss, text_correct, text_total = self.compute_loss_and_acc(model, lprobs_text, target, reduction=reduction)
loss = (speech_loss + text_loss)
nll_loss = (speech_nll_loss + text_nll_loss)
correct = speech_correct + text_correct
total = speech_total + text_total
attn_cost = net_output[1].get('attn_cost')
if attn_cost is not None:
# attn_cost is batch_first and padding tokens have been masked already
src_token_num = attn_cost.ne(0).sum()
attn_cost = attn_cost.sum()
loss = loss + attn_cost * self.attn_beta
else:
attn_cost = 0
else:
loss, nll_loss, correct, total = self.compute_loss_and_acc(model, lprobs, target, reduction=reduction)
if sample["net_input"]['src_tokens'] is None: # text input only
loss = loss * self.text_input_cost_ratio
speech_loss = None
speech_nll_loss = None
sample_size, logging_output = self.get_logging_output(
sample, loss, nll_loss, correct, total, src_token_num, speech_loss, speech_nll_loss, attn_cost, is_dual_input
)
return loss, sample_size, logging_output
def compute_loss_and_acc(self, model, lprobs, target, reduction='sum'):
if not lprobs.batch_first:
lprobs = lprobs.transpose(0, 1)
lprobs = lprobs.view(-1, lprobs.size(-1)) # -> (B x T) x C
target = target.view(-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=(reduction == 'sum'),
)
mask = target.ne(self.padding_idx)
correct = torch.sum(lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)))
total = torch.sum(mask)
return loss, nll_loss, correct, total
def guide_loss_and_acc(self, model, lprobs, lprobs_teacher, target, reduce=True):
""" lprobs_teacher is used as guide for lprobs """
if self.alpha == 0.0 or model.num_updates < self.disable_update_num:
return self.compute_loss_and_acc(model, lprobs, target, reduction=('sum' if reduce else 'none'))
if not lprobs.batch_first:
lprobs = lprobs.transpose(0, 1)
lprobs_teacher = lprobs_teacher.transpose(0, 1)
lprobs = lprobs.view(-1, lprobs.size(-1)).float() # -> (B x T) x C
lprobs_teacher = lprobs_teacher.view(-1, lprobs_teacher.size(-1)).float() # -> (B x T) x C
target = target.view(-1)
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction='sum' if reduce else 'none')
nll_loss = loss
probs_teacher = lprobs_teacher.exp().masked_fill_(target.unsqueeze(-1).eq(self.padding_idx), 0)
probs_teacher = probs_teacher.detach()
guide_loss = -(probs_teacher*lprobs).sum() if reduce else -(probs_teacher*lprobs).sum(-1, keepdim=True)
loss = self.alpha*guide_loss + (1.0 - self.alpha)*loss
mask = target.ne(self.padding_idx)
correct = torch.sum(lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)))
total = torch.sum(mask)
return loss, nll_loss, correct, total
def get_logging_output(
self,
sample,
loss,
nll_loss,
correct,
total,
src_token_num=0,
speech_loss=None,
speech_nll_loss=None,
attn_cost=None,
is_dual_input=False,
):
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
mul_size = 2 if is_dual_input else 1
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"nll_loss": utils.item(nll_loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"]*mul_size,
"nsentences": sample["target"].size(0)*mul_size,
"sample_size": sample_size*mul_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"src_token_num": utils.item(src_token_num.data) if src_token_num > 0 else 0,
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
if speech_loss is not None:
logging_output["speech_loss"] = utils.item(speech_loss.data)
logging_output["speech_nll_loss"] = utils.item(speech_nll_loss.data)
logging_output["sample_size_speech_cost"] = sample_size
logging_output["speech_attn_loss"] = attn_cost
return sample_size*mul_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
src_token_sum = sum(log.get("src_token_num", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
speech_loss_sum = sum(log.get("speech_loss", 0) for log in logging_outputs)
speech_nll_loss_sum = sum(log.get("speech_nll_loss", 0) for log in logging_outputs)
speech_attn_loss_sum = sum(log.get("speech_attn_loss", 0) for log in logging_outputs)
sample_size_speech = sum(log.get("sample_size_speech_cost", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
"nll_loss": nll_loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, and loss
# is per-sentence loss; else sample_size is ntokens, and the loss
# becomes per-output token loss
"speech_loss": speech_loss_sum / sample_size_speech / math.log(2) if sample_size_speech > 0 else 0.0,
"speech_nll_loss": speech_nll_loss_sum / sample_size_speech / math.log(2) if sample_size_speech > 0 else 0.0,
"speech_attn_loss": speech_attn_loss_sum / src_token_sum / math.log(2) if src_token_sum > 0 else 0.0,
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
"src_token_num": src_token_sum,
# total is the number of validate tokens
}
return agg_output
@classmethod
def reduce_metrics(cls, logging_outputs):
"""Aggregate logging outputs from data parallel training."""
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for k, v in agg_logging_outputs.items():
if k in {'nsentences', 'ntokens', 'sample_size'}:
continue
metrics.log_scalar(k, v, round=3)
| 11,004 | 48.129464 | 139 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/criterions/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_text_joint_to_text.criterions." + criterion_name
)
| 487 | 29.5 | 77 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/models/s2t_dualinputtransformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import namedtuple
import torch
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.speech_to_text import (
TransformerDecoder,
S2TTransformerEncoder,
)
from fairseq.models.transformer import TransformerEncoder
from fairseq.modules import (
TransformerEncoderLayer,
GradMultiply,
LayerNorm,
)
logger = logging.getLogger(__name__)
class SpeechEoSEncoder(FairseqEncoder):
def __init__(self, encoder, eos_num, feat_dim, adapter_type="None", adapter_dim=0):
super().__init__(None)
self.encoder = encoder
self.eos_num = eos_num # downsampling rate for speech input feature
self.eos_emb = (
nn.Parameter(torch.zeros(1, feat_dim), requires_grad=True)
if eos_num > 0
else None
)
self.adapter = self.add_adapter(adapter_type, adapter_dim)
def add_adapter(self, adapter_type, adapter_dim):
def _make_identity(linear, eps=1e-5):
assert isinstance(linear, nn.Linear)
linear.weight.data.mul_(eps)
linear.weight.data.fill_diagonal_(1.0)
if linear.bias is not None:
linear.bias.data.mul_(eps)
adapter = None
if adapter_type == "Linear":
assert adapter_dim > 0
adapter = nn.Sequential(
nn.Linear(adapter_dim, adapter_dim), LayerNorm(adapter_dim)
)
# initialize the adapter as identity matrix first
_make_identity(adapter[0])
elif adapter_type == "MLP":
assert adapter_dim > 0
# assume the model is pre-norm model
adapter = nn.Sequential(
nn.Linear(adapter_dim, 2 * adapter_dim),
nn.ReLU(),
nn.Linear(2 * adapter_dim, adapter_dim),
LayerNorm(adapter_dim),
)
_make_identity(adapter[0])
_make_identity(adapter[2])
return adapter
def add_eos(self, src_tokens, src_lengths):
bsz, max_seq_len, fdim = src_tokens.size()
if self.eos_num > 0:
src_token_eos = torch.zeros(
[bsz, max_seq_len + self.eos_num, fdim],
dtype=src_tokens.dtype,
device=src_tokens.device,
)
src_token_eos[:, :max_seq_len] = src_tokens
for bi in range(bsz):
src_token_eos[bi][
src_lengths[bi] : src_lengths[bi] + self.eos_num
] = self.eos_emb.expand(self.eos_num, fdim)
src_lengths = src_lengths + self.eos_num
src_tokens = src_token_eos
return src_tokens, src_lengths
def apply_adapter(self, enc_out):
if self.adapter is None:
return enc_out
rst = self.adapter(enc_out.encoder_out)
if enc_out.encoder_padding_mask is not None:
rst.masked_fill_(
enc_out.encoder_padding_mask.transpose(0, 1).unsqueeze(-1), 0
)
return EncoderOut(
encoder_out=rst,
encoder_padding_mask=enc_out.encoder_padding_mask,
encoder_embedding=enc_out.encoder_embedding,
encoder_states=enc_out.encoder_states,
src_tokens=enc_out.src_tokens,
src_lengths=enc_out.src_lengths,
)
def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
src_tokens, src_lengths = self.add_eos(src_tokens, src_lengths)
enc_out = self.encoder(src_tokens, src_lengths, return_all_hiddens)
enc_out = self.apply_adapter(enc_out)
return enc_out
def reorder_encoder_out(self, encoder_out, new_order):
return self.encoder.reorder_encoder_out(encoder_out, new_order)
class DualInputEncoder(FairseqEncoder):
def __init__(
self,
args,
spch_encoder,
text_encoder,
dictionary,
cross_attentive_loss_before_last_layer=-1,
):
super().__init__(dictionary)
self.spch_encoder = spch_encoder
self.text_encoder = text_encoder
self.enc_grad_mult = args.enc_grad_mult
self.cross_attentive_loss_before_last_layer = (
cross_attentive_loss_before_last_layer
)
self.use_cross_attentive_loss = (
False if cross_attentive_loss_before_last_layer <= -1 else True
)
self.enc2_along_grad_mult = args.enc2_along_grad_mult
@classmethod
def set_shared_layer(cls, share_level, src_layer, tgt_layer):
"""
share parameters from tgt_layer to src_layer
share_level:
0: share everything
1: share everything but different model
2: share weight but not bias, layernorm
"""
if share_level == 0:
return tgt_layer
if isinstance(src_layer, nn.Linear):
return tgt_layer
if isinstance(src_layer, TransformerEncoderLayer):
assert src_layer.embed_dim == tgt_layer.embed_dim
assert src_layer.normalize_before == tgt_layer.normalize_before
if share_level == 1:
src_layer.fc1 = tgt_layer.fc1
src_layer.fc2 = tgt_layer.fc2
src_layer.self_attn = tgt_layer.self_attn
src_layer.final_layer_norm = tgt_layer.final_layer_norm
src_layer.self_attn_layer_norm = tgt_layer.self_attn_layer_norm
src_layer.layernorm_embedding = tgt_layer.layernorm_embedding
else:
src_layer.fc1.weight = tgt_layer.fc1.weight
src_layer.fc2.weight = tgt_layer.fc2.weight
src_layer.self_attn.k_proj.weight = tgt_layer.self_attn.k_proj.weight
src_layer.self_attn.v_proj.weight = tgt_layer.self_attn.v_proj.weight
src_layer.self_attn.q_proj.weight = tgt_layer.self_attn.q_proj.weight
src_layer.self_attn.out_proj.weight = (
tgt_layer.self_attn.out_proj.weight
)
else:
if share_level == 1:
return tgt_layer
return src_layer
@classmethod
def build_spch_encoder(cls, args):
cfg = {
"input_feat_per_channel": args.input_feat_per_channel,
"input_channels": args.input_channels,
"conv_kernel_sizes": args.conv_kernel_sizes,
"conv_channels": args.conv_channels,
"encoder_embed_dim": args.encoder_embed_dim,
"encoder_ffn_embed_dim": args.encoder_ffn_embed_dim,
"encoder_layers": args.speech_encoder_layers,
"encoder_layerdrop": args.encoder_layerdrop,
"encoder_attention_heads": args.encoder_attention_heads,
"max_source_positions": args.max_source_positions,
"dropout": args.dropout,
"encoder_normalize_before": args.encoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"activation_fn": args.activation_fn,
"layernorm_embedding": args.layernorm_embedding,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
"no_scale_embedding": args.no_scale_embedding,
"quant_noise_pq": args.quant_noise_pq,
"encoder_freezing_updates": 0,
}
model_args = namedtuple("args", cfg.keys())(*cfg.values())
spch_encoder = S2TTransformerEncoder(model_args)
if args.add_speech_eos:
spch_encoder = SpeechEoSEncoder(
spch_encoder,
2 * len(args.conv_kernel_sizes.split(",")),
args.input_feat_per_channel,
adapter_type=getattr(args, "speech_encoder_adapter_type", "None"),
adapter_dim=args.encoder_embed_dim,
)
return spch_encoder
@classmethod
def build_text_encoder(cls, args, src_dictionary, spch_encoder):
if args.encoder_shared_layers > 0:
mx_shared_layers = (
args.speech_encoder_layers
if args.speech_encoder_layers < args.text_encoder_layers
else args.text_encoder_layers
)
args.encoder_shared_layers = (
args.encoder_shared_layers
if args.encoder_shared_layers <= mx_shared_layers
else mx_shared_layers
)
cfg = {
"encoder_embed_dim": args.encoder_text_embed_dim,
"encoder_ffn_embed_dim": args.encoder_ffn_embed_dim,
"encoder_layers": args.text_encoder_layers,
"encoder_layerdrop": args.encoder_layerdrop,
"encoder_attention_heads": args.encoder_attention_heads,
"encoder_learned_pos": args.encoder_learned_pos,
"max_source_positions": args.max_source_positions,
"dropout": args.dropout,
"encoder_normalize_before": args.encoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"activation_fn": args.activation_fn,
"adaptive_input": args.adaptive_input,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
"no_scale_embedding": args.no_scale_embedding,
"quant_noise_pq": args.quant_noise_pq,
}
model_args = namedtuple("args", cfg.keys())(*cfg.values())
enc_emb = nn.Embedding(
len(src_dictionary), model_args.encoder_embed_dim, src_dictionary.pad()
)
text_encoder = TransformerEncoder(model_args, src_dictionary, enc_emb)
if args.add_speech_eos:
spch_encoder = spch_encoder.encoder
if args.encoder_shared_layers > 0:
text_encoder.layer_norm = cls.set_shared_layer(
args.encoder_shared_layer_level,
text_encoder.layer_norm,
spch_encoder.layer_norm,
)
for i, ly in enumerate(
spch_encoder.transformer_layers[-args.encoder_shared_layers :]
):
ly_id = i + args.text_encoder_layers - args.encoder_shared_layers
if not isinstance(text_encoder.layers[ly_id], type(ly)):
if text_encoder.layers[ly_id]._get_name() not in ('TransformerEncoderLayerBase', 'TransformerEncoderLayer'):
raise ValueError("The shared layers are expected from the same class")
text_encoder.layers[ly_id] = cls.set_shared_layer(
args.encoder_shared_layer_level,
text_encoder.layers[ly_id],
ly,
)
return text_encoder
def mult_rst_grad(self, rst, ratio):
assert isinstance(rst, dict) # instead of EncoderOut
assert len(rst["encoder_out"]) == 1
rst["encoder_out"][0] = GradMultiply.apply(rst["encoder_out"][0], ratio)
return rst
def process_attentive_loss_states(self, rst, interstates):
assert isinstance(rst, dict) # instead of EncoderOut
rst["encoder_states"] = interstates
return rst
def forward(
self,
src_tokens,
src_lengths=None,
src_txt_tokens=None,
src_txt_lengths=None,
**kwargs
):
"""
Args:
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (speech) (B,)
src_txt_tokens: padded tensor (B, T)
src_txt_lengths: tensor of original lengths of input utterances (text) (B,)
"""
# src_tokens only: inference
# src_tokens, src_lengths: speech only training
# src_txt_tokens, src_txt_lengths: text only training
# all valid: speech + text training
if src_tokens is None and src_txt_tokens is None:
raise ValueError(
"src_tokens and src_txt_tokens cannot be None at the same time"
)
ret1 = None
ret2 = None
return_all_hiddens = False
if src_tokens is not None:
if (
self.use_cross_attentive_loss and src_txt_tokens is not None
): # remove self.training so we can get attn score during validation step
return_all_hiddens = True
ret1 = self.spch_encoder(
src_tokens, src_lengths, return_all_hiddens=return_all_hiddens
)
if self.use_cross_attentive_loss and src_txt_tokens is not None:
assert self.cross_attentive_loss_before_last_layer < len(
ret1["encoder_states"]
)
ret1 = self.process_attentive_loss_states(
ret1,
ret1["encoder_states"][
-self.cross_attentive_loss_before_last_layer - 1
],
)
if src_txt_tokens is not None:
ret2 = self.text_encoder(
src_txt_tokens, src_txt_lengths, return_all_hiddens=return_all_hiddens
)
if return_all_hiddens:
if self.cross_attentive_loss_before_last_layer == len(
self.text_encoder.layers
):
text_embedding, _ = self.text_encoder.forward_embedding(
src_txt_tokens
)
text_embedding = text_embedding.transpose(0, 1)
ret2 = self.process_attentive_loss_states(ret2, text_embedding)
else:
assert self.cross_attentive_loss_before_last_layer < len(
self.text_encoder.layers
)
ret2 = self.process_attentive_loss_states(
ret2,
ret2["encoder_states"][
-self.cross_attentive_loss_before_last_layer - 1
],
)
def merge_output(rst1, rst2):
if rst1 is None:
if not (self.enc2_along_grad_mult == 1.0 or self.training):
rst2 = self.mult_rst_grad(rst2, self.enc2_along_grad_mult)
return rst2
if rst2 is None:
return rst1
if self.enc_grad_mult != 1.0 and self.training:
rst1 = self.mult_rst_grad(rst1, self.enc_grad_mult)
rst2 = self.mult_rst_grad(rst2, self.enc_grad_mult)
rst = (rst1, rst2)
return rst
return merge_output(ret1, ret2)
def reorder_encoder_out(self, encoder_out, new_order):
assert self.training is False # used for inference only
return self.spch_encoder.reorder_encoder_out(encoder_out, new_order)
# TransformerMultiInputDecoder: take one or two encoder inputs
class TransformerMultiInputDecoder(FairseqDecoder):
def __init__(
self,
dictionary,
spch_decoder,
text_decoder,
compute_cross_attentive_loss=False,
cross_attentive_loss_with_norm=True,
cross_attentive_loss_reverse=False,
):
super().__init__(dictionary)
self.spch_decoder = spch_decoder
self.text_decoder = text_decoder
self.compute_cross_attentive_loss = compute_cross_attentive_loss
self.cross_attentive_loss_with_norm = cross_attentive_loss_with_norm
self.cross_attentive_loss_reverse = cross_attentive_loss_reverse
@classmethod
def share_spchdecoder(cls, task_args, text_decoder, spch_decoder):
if task_args.decoder_shared_layer_level == 0:
return text_decoder
assert text_decoder.embed_tokens == spch_decoder.embed_tokens
spch_decoder.project_in_dim = text_decoder.project_in_dim
spch_decoder.embed_positions = text_decoder.embed_positions
spch_decoder.layernorm_embedding = text_decoder.layernorm_embedding
spch_decoder.project_out_dim = text_decoder.project_out_dim
spch_decoder.adaptive_softmax = text_decoder.adaptive_softmax
if task_args.decoder_shared_layer_level == 1:
spch_decoder.output_projection = text_decoder.output_projection
spch_decoder.layer_norm = text_decoder.layer_norm
else: # 2
spch_decoder.output_projection.weight = (
text_decoder.output_projection.weight
)
for i, ly in enumerate(text_decoder.layers):
sly = spch_decoder.layers[i]
sly.self_attn = ly.self_attn
sly.self_attn_layer_norm = ly.self_attn_layer_norm
# sly.encoder_attn = ly.encoder_attn
if (
task_args.decoder_shared_layer_level == 1
): # share everything, but under different models
sly.encoder_attn = ly.encoder_attn
sly.encoder_attn_layer_norm = ly.encoder_attn_layer_norm
sly.fc1 = ly.fc1
sly.fc2 = ly.fc2
sly.final_layer_norm = ly.final_layer_norm
else: # task_args.decoder_shared_layer_level == 2: #separated encoder_attn_layer_norm and bias
sly.encoder_attn.k_proj.weight = ly.encoder_attn.k_proj.weight
sly.encoder_attn.v_proj.weight = ly.encoder_attn.v_proj.weight
sly.encoder_attn.q_proj.weight = ly.encoder_attn.q_proj.weight
sly.encoder_attn.out_proj.weight = ly.encoder_attn.out_proj.weight
sly.fc1.weight = ly.fc1.weight
sly.fc2.weight = ly.fc2.weight
return spch_decoder
def cross_attentive_loss(
self, teacher_states, student_states, teacher_masking, student_masking, eps=1e-6
):
x = teacher_states.transpose(0, 1) # from T X B X D to B X T X D
y = student_states.transpose(0, 1)
if self.cross_attentive_loss_with_norm:
x = x / (x.norm(dim=2, keepdim=True) + eps)
y = y / (y.norm(dim=2, keepdim=True) + eps)
dim = x.size(-1)
# lengths: batch X seqLen
sim_scores_xy = torch.bmm(x, y.transpose(1, 2)) # batch X lenx X leny ]
if y.dtype == torch.float16:
sim_scores_xy = sim_scores_xy.float()
y = y.float()
x = x.float()
if teacher_masking != []:
assert len(teacher_masking) == 1
sim_scores_xy = sim_scores_xy.masked_fill(
teacher_masking[0].unsqueeze(-1), float("-inf")
)
if student_masking != []:
sim_scores_xy = sim_scores_xy.masked_fill(
student_masking[0].unsqueeze(1), float("-inf")
)
# do masking
y_weights = utils.softmax(sim_scores_xy, dim=-1)
if teacher_masking != []:
y_weights = y_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0)
x_reconstruct_from_y = torch.bmm(y_weights, y)
sim_scores_xx = torch.bmm(x, x.transpose(1, 2)) # batch X lenx X lenx ]
x_weights = utils.softmax(sim_scores_xx, dim=-1)
if teacher_masking != []:
x_weights = x_weights.masked_fill(teacher_masking[0].unsqueeze(-1), 0)
# no gradient for teacher state
x_reconstruct_from_x = torch.bmm(x_weights, x).detach()
cost = (x_reconstruct_from_x - x_reconstruct_from_y).norm(dim=2)
if teacher_masking != []:
cost = cost.masked_fill(teacher_masking[0], 0)
if not self.cross_attentive_loss_with_norm:
cost = cost / dim
return cost
def forward(
self,
prev_output_tokens,
encoder_out,
incremental_state=None,
has_txt_input=False,
**kwargs
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing. If there are
two or more input during training, they will share the same prev_output_tokens
encoder_out (tuple[Tensor]): output from the encoder, used for
encoder-side attention. It will be tuple if there are more inputs, but a tensor
if only one input
incremental_state ([dict]): dictionary used for storing state during
:ref:`Incremental decoding`. It is only valid for inference, only from single
input
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`. If there are N inputs, batch will be N bigger than a single input
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
assert not isinstance(encoder_out, EncoderOut)
if isinstance(encoder_out, tuple): # training with mulitple input
rst = []
assert len(encoder_out) == 2
for i, eo in enumerate(encoder_out):
assert incremental_state is None
if i == 0:
rst.append(
self.spch_decoder(prev_output_tokens, eo, incremental_state)
)
else:
rst.append(
self.text_decoder(prev_output_tokens, eo, incremental_state)
)
dec_out = torch.cat([r[0] for r in rst], dim=0)
attn_cost = None
if self.compute_cross_attentive_loss:
assert isinstance(encoder_out[0], dict)
if self.cross_attentive_loss_reverse:
attn_cost = self.cross_attentive_loss(
teacher_states=encoder_out[1]["encoder_states"], # text_states
student_states=encoder_out[0]["encoder_states"], # spch_states
teacher_masking=encoder_out[1]["encoder_padding_mask"],
student_masking=encoder_out[0]["encoder_padding_mask"],
)
else:
attn_cost = self.cross_attentive_loss(
teacher_states=encoder_out[0]["encoder_states"], # spch_states
student_states=encoder_out[1]["encoder_states"], # text_states
teacher_masking=encoder_out[0]["encoder_padding_mask"],
student_masking=encoder_out[1]["encoder_padding_mask"],
)
return (dec_out, {"attn_cost": attn_cost})
else: # inference or training with one input
if has_txt_input:
return self.text_decoder(
prev_output_tokens, encoder_out, incremental_state
)
return self.spch_decoder(prev_output_tokens, encoder_out, incremental_state)
# Note:
# dual input transformer:
# encoder: S2TTransformerEncoder for speech + TransformerEncoder for text
# decoder: TransformerDecoder for text
@register_model("dual_input_s2t_transformer")
class DualInputS2TTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.num_updates = 0
def max_positions(self):
return None # it is provided in task
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# encoder 1: S2TTransformerEncoder for speech
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
# standard Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-text-embed-dim",
type=int,
metavar="N",
help="encoder text embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
# non-standard transformer parameters
parser.add_argument(
"--speech-encoder-layers",
type=int,
metavar="N",
help="num speech encoder layers",
)
parser.add_argument(
"--text-encoder-layers",
type=int,
metavar="N",
help="num text encoder layers",
)
parser.add_argument(
"--encoder-shared-layers",
type=int,
metavar="N",
help="num shared encoder layers",
)
parser.add_argument(
"--encoder-shared-layer-level",
type=int,
metavar="N",
default=0,
choices=[0, 1, 2],
help="share layer level 0: all share 1: all share with separate model 2: share weight but not bias and layernorm",
)
parser.add_argument(
"--decoder-shared-layer-level",
default=0,
choices=[0, 1, 2],
type=int,
metavar="N",
help="0: share everything; 1: share everything with different model 2: no share layer_norm and bias",
)
###
parser.add_argument(
"--text-input-cost-ratio",
type=float,
default=1.0,
metavar="V",
help="text input cost ratio relative to speech input cost",
)
parser.add_argument(
"--init-scale",
type=float,
default=1.0,
metavar="V",
help="scale the initial weight by given factor",
)
parser.add_argument(
"--enc-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc1 and enc2 gradient by V",
)
parser.add_argument(
"--enc2-along-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc2 gradient by V if only enc2 is used",
)
parser.add_argument(
"--load-pretrain-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained encoder """,
)
parser.add_argument(
"--load-pretrain-speech-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained speech encoder """,
)
parser.add_argument(
"--load-pretrain-text-encoder",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained text encoder """,
)
parser.add_argument(
"--load-pretrain-text-encoder-last",
type=str,
default="",
metavar="EXPR",
help=""" path to the pretrained text encoder """,
)
parser.add_argument(
"--load-pretrain-decoder",
type=str,
metavar="EXPR",
default="",
help=""" path to the pretrained encoder """,
)
parser.add_argument(
"--add-speech-eos",
action="store_true",
help="add eos token at the end of input feature",
)
parser.add_argument(
"--speech-encoder-adapter-type",
type=str,
metavar="EXPR",
default="None",
choices=["None", "Linear", "MLP"],
help="add speech encoder adapter",
)
@classmethod
def build_encoder(cls, args, task):
spch_encoder = DualInputEncoder.build_spch_encoder(args)
text_encoder = DualInputEncoder.build_text_encoder(
args, task.src_dict, spch_encoder
)
cross_attentive_loss_before_last_layer = (
0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1
)
encoder = DualInputEncoder(
args,
spch_encoder,
text_encoder,
task.src_dict,
cross_attentive_loss_before_last_layer,
)
if args.init_scale != 1.0:
with torch.no_grad():
for param in encoder.parameters():
param.data.mul_(args.init_scale)
if args.load_pretrain_text_encoder != "":
checkpoint_utils.load_pretrained_component_from_model(
text_encoder, args.load_pretrain_text_encoder
)
if args.load_pretrain_speech_encoder != "":
if hasattr(spch_encoder, "encoder"):
checkpoint_utils.load_pretrained_component_from_model(
spch_encoder.encoder, args.load_pretrain_speech_encoder
)
else:
checkpoint_utils.load_pretrained_component_from_model(
spch_encoder, args.load_pretrain_speech_encoder
)
if (
args.load_pretrain_text_encoder_last != ""
): # if share encoder, speech encoder parameters will be used.
# It provides a chance to use pre-trained mt encoder instead
checkpoint_utils.load_pretrained_component_from_model(
text_encoder, args.load_pretrain_text_encoder_last
)
if args.load_pretrain_encoder != "":
checkpoint_utils.load_pretrained_component_from_model(
encoder, args.load_pretrain_encoder
)
return encoder
@classmethod
def build_decoder(cls, args, task):
dec_cfg = {
"decoder_layerdrop": args.decoder_layerdrop,
"share_decoder_input_output_embed": args.share_decoder_input_output_embed,
"decoder_embed_dim": args.decoder_embed_dim,
"max_target_positions": args.max_target_positions,
"dropout": args.dropout,
"encoder_learned_pos": args.encoder_learned_pos,
"decoder_learned_pos": args.decoder_learned_pos,
"layernorm_embedding": args.layernorm_embedding,
"decoder_normalize_before": args.decoder_normalize_before,
"activation_dropout": args.activation_dropout,
"attention_dropout": args.attention_dropout,
"decoder_ffn_embed_dim": args.decoder_ffn_embed_dim,
"decoder_layers": args.decoder_layers,
"decoder_attention_heads": args.decoder_attention_heads,
"decoder_output_dim": args.decoder_embed_dim,
"no_scale_embedding": args.no_scale_embedding,
"adaptive_input": args.adaptive_input,
"quant_noise_pq": args.quant_noise_pq,
"adaptive_softmax_cutoff": args.adaptive_softmax_cutoff,
"tie_adaptive_weights": args.tie_adaptive_weights,
"no_token_positional_embeddings": args.no_token_positional_embeddings,
"encoder": {"embed_dim":args.encoder_embed_dim}
}
dec_cfg = namedtuple("args", dec_cfg.keys())(*dec_cfg.values())
dec_emb = nn.Embedding(
len(task.target_dictionary),
args.decoder_embed_dim,
task.target_dictionary.pad(),
)
compute_cross_attentive_loss = (
True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False
)
cross_attentive_loss_without_norm = getattr(
args, "attentive_cost_without_normalize", False
)
cross_attentive_loss_reverse = (
False # getattr(args, "attentive_cost_reverse", False)
)
text_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb)
spch_decoder = TransformerDecoder(dec_cfg, task.target_dictionary, dec_emb)
spch_decoder = TransformerMultiInputDecoder.share_spchdecoder(
args, text_decoder, spch_decoder
)
decoder = TransformerMultiInputDecoder(
dictionary=task.target_dictionary,
spch_decoder=spch_decoder,
text_decoder=text_decoder,
compute_cross_attentive_loss=compute_cross_attentive_loss,
cross_attentive_loss_with_norm=True
if not cross_attentive_loss_without_norm
else False,
cross_attentive_loss_reverse=cross_attentive_loss_reverse,
)
if args.init_scale != 1.0:
with torch.no_grad():
for param in decoder.parameters():
param.data.mul_(args.init_scale)
if args.load_pretrain_decoder != "":
try:
checkpoint_utils.load_pretrained_component_from_model(
decoder, args.load_pretrain_decoder
)
except RuntimeError:
checkpoint_utils.load_pretrained_component_from_model(
decoder.text_decoder, args.load_pretrain_decoder
)
if args.decoder_shared_layer_level > 0:
checkpoint_utils.load_pretrained_component_from_model(
decoder.spch_decoder, args.load_pretrain_decoder
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
dualinputs2ttransformer_base(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
use_encoder_outputs=False,
src_txt_tokens=None,
src_txt_lengths=None,
mode="sup_speech",
**kwargs
):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
mode = 'sup_speech' or 'text'
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
if mode == "text":
assert src_txt_tokens is None
src_txt_tokens = src_tokens
src_txt_lengths = src_lengths
src_tokens = None
src_lengths = None
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
src_txt_tokens=src_txt_tokens,
src_txt_lengths=src_txt_lengths,
**kwargs
)
has_txt_input = True if src_txt_tokens is not None else False
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
has_txt_input=has_txt_input,
**kwargs
)
if use_encoder_outputs:
return decoder_out, encoder_out
return decoder_out
@register_model_architecture(
"dual_input_s2t_transformer", "dualinputs2ttransformer_base"
)
def dualinputs2ttransformer_base(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_text_embed_dim = getattr(
args, "encoder_text_embed_dim", args.encoder_embed_dim
)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.encoder_shared_layers = getattr(args, "encoder_shared_layers", 0)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.add_speech_eos = getattr(args, "add_speech_eos", False)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_s")
def dualinputs2ttransformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 7)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 7)
args.decoder_layers = getattr(args, "decoder_layers", 7)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_m")
def dualinputs2ttransformer_m(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.15)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 10)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_b")
def dualinputs2ttransformer_b(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.dropout = getattr(args, "dropout", 0.15)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
@register_model_architecture("dual_input_s2t_transformer", "dualinputs2ttransformer_l")
def dualinputs2ttransformer_l(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.2)
args.speech_encoder_layers = getattr(args, "speech_encoder_layers", 12)
args.text_encoder_layers = getattr(args, "text_encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
dualinputs2ttransformer_base(args)
| 45,047 | 40.177331 | 128 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/models/s2t_dualinputxmtransformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
register_model,
register_model_architecture,
FairseqEncoder,
)
from fairseq.models.speech_to_text import Wav2VecEncoderWithAdaptor
from fairseq.models.speech_to_text.xm_transformer import (
set_default_adaptor_args,
set_default_w2v_encoder_args,
need_finetuning
)
from fairseq.models.transformer import TransformerEncoder, TransformerDecoder
from fairseq.models.wav2vec import TransformerSentenceEncoderLayer
from fairseq.utils import safe_hasattr
from .s2t_dualinputtransformer import (
DualInputS2TTransformerModel,
TransformerMultiInputDecoder,
DualInputEncoder,
)
class TransformerSentenceEncoderLayerStd(TransformerSentenceEncoderLayer):
def __init__(self, sent_enc_layer):
super(TransformerSentenceEncoderLayer, self).__init__()
self.embedding_dim = sent_enc_layer.embedding_dim
self.dropout = sent_enc_layer.dropout
self.activation_dropout = sent_enc_layer.activation_dropout
# Initialize blocks
self.activation_fn = sent_enc_layer.activation_fn
self.self_attn = sent_enc_layer.self_attn
self.dropout1 = sent_enc_layer.dropout1
self.dropout2 = sent_enc_layer.dropout2
self.dropout3 = sent_enc_layer.dropout3
self.layer_norm_first = sent_enc_layer.layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = sent_enc_layer.self_attn_layer_norm
self.fc1 = sent_enc_layer.fc1
self.fc2 = sent_enc_layer.fc2
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = sent_enc_layer.final_layer_norm
def forward(
self,
x,
self_attn_mask=None,
self_attn_padding_mask=None,
need_weights=None,
att_args=None,
):
x, attn = super().forward(
x, self_attn_mask, self_attn_padding_mask, need_weights, att_args
)
return x
# TODO retire SharedEncoder
class SharedEncoder(FairseqEncoder):
def __init__(self, wav2vec_enc, mbart_enc, adaptor, shared_layers):
super().__init__(None)
self.w2v_encoder = wav2vec_enc
self.shared_layers = self.w2v_encoder.w2v_model.encoder.layers[-shared_layers:]
self.w2v_encoder.w2v_model.encoder.layers = (
self.w2v_encoder.w2v_model.encoder.layers[:-shared_layers]
)
self.adaptor = adaptor
if self.shared_layers[-1].layer_norm_first:
self.final_layer_norm = mbart_enc.layer_norm
else:
mbart_enc.layer_norm = None
self.final_layer_norm = None
shared_layer_from = len(mbart_enc.layers) - shared_layers
if shared_layer_from < 0:
shared_layer_from = 0
for layer_id, layer in enumerate(self.shared_layers):
mbart_enc.layers[
shared_layer_from + layer_id
] = TransformerSentenceEncoderLayerStd(layer)
def forward(self, src_tokens, src_lengths=None, **kwargs):
padding_mask = lengths_to_padding_mask(src_lengths)
if not padding_mask.any():
padding_mask = None
out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)
x = out["encoder_out"]
enc_padding_mask = None
if out["encoder_padding_mask"] is not None:
enc_padding_mask = out["encoder_padding_mask"].transpose(
0, 1
) # T X B --> B X T
x, enc_padding_mask = self.adaptor(x, enc_padding_mask)
for layer in self.shared_layers:
x, _ = layer(x, enc_padding_mask)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [enc_padding_mask]
if enc_padding_mask is not None
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
class StackedWav2VecEncoderWithAdaptor(FairseqEncoder):
def __init__(
self,
wav2vec_enc,
mbart_enc_layers,
mbart_layer_norm,
adaptor,
drop_w2v_layers=0,
):
super().__init__(None)
self.w2v_encoder = wav2vec_enc
self.adaptor = adaptor
self.mbart_encoder_layers = mbart_enc_layers
self.final_layer_norm = mbart_layer_norm
if drop_w2v_layers > 0:
self.w2v_encoder.w2v_model.encoder.layers = (
self.w2v_encoder.w2v_model.encoder.layers[:-drop_w2v_layers]
)
def forward(self, src_tokens, src_lengths=None, return_all_hiddens=False, **kwargs):
padding_mask = lengths_to_padding_mask(src_lengths)
if not padding_mask.any():
padding_mask = None
out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)
x = out["encoder_out"]
enc_padding_mask = None
if out["padding_mask"] is not None:
enc_padding_mask = out["padding_mask"] # B X T
x, enc_padding_mask = self.adaptor(x, enc_padding_mask)
encoder_states = []
for layer in self.mbart_encoder_layers:
x = layer(x, enc_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.final_layer_norm is not None:
x = self.final_layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [enc_padding_mask]
if enc_padding_mask is not None
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
# Note:
# dual input transformer:
# encoder: wav2vec for speech + mbart encoder for text
# decoder: mbart decoder for text
@register_model("dual_input_xm_transformer")
class DualInputXMTransformerModel(DualInputS2TTransformerModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# wav2vec encoder
Wav2VecEncoderWithAdaptor.add_args(parser)
# add_decoder_args(parser)
# mbart Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--mbart-dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--mbart-attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--mbart-activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-mbart-from",
type=str,
metavar="STR",
help="model to take text encoder decoder weights from (for initialization)",
)
# parser.add_argument("--finetune-w2v-params", type=str, metavar="STR",
# help="comma-separated param strings to finetune.")
parser.add_argument(
"--finetune-mbart-decoder-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
parser.add_argument(
"--finetune-mbart-encoder-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
parser.add_argument(
"--skip-encoder-projection",
action="store_true",
help="skip the projection layer in encoder",
)
parser.add_argument(
"--enc-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc1 and enc2 gradient by V",
)
parser.add_argument(
"--enc2-along-grad-mult",
type=float,
metavar="V",
default=1.0,
help="multiply enc2 gradient by V if only enc2 is used",
)
parser.add_argument(
"--text-input-cost-ratio",
type=float,
default=1.0,
metavar="V",
help="text input cost ratio relative to speech input cost",
)
parser.add_argument(
"--stack-w2v-mbart-encoder",
action="store_true",
help="stack w2v and mbart encoder",
)
parser.add_argument(
"--stack-w2v-mbart-nonorm-encoder",
action="store_true",
help="stack w2v and mbart encoder",
)
parser.add_argument(
"--no-final-norm-decoder", action="store_true", help="no layer norm"
)
parser.add_argument(
"--drop-w2v-layers",
type=int,
default=0,
metavar="N",
help="drop w2v encoder layers",
)
parser.add_argument(
"--share-w2v-text-encoder",
action="store_true",
help="share w2v encoder layers with text encoder",
)
parser.add_argument(
"--shared-w2v-layers",
type=int,
default=0,
metavar="N",
help="shared encoder layers from w2v encoder",
)
@classmethod
def build_encoder(cls, args, task):
_args = copy.deepcopy(args)
_args.dropout = args.mbart_dropout
_args.attention_dropout = args.mbart_attention_dropout
_args.activation_dropout = args.mbart_activation_dropout
_args.max_source_positions = 1024
enc_emb = nn.Embedding(
len(task.src_dict), _args.encoder_embed_dim, task.src_dict.pad()
)
text_encoder = TransformerEncoder(_args, task.src_dict, enc_emb)
spch_encoder = Wav2VecEncoderWithAdaptor(args)
if getattr(args, "load_pretrained_mbart_from", None):
text_encoder = checkpoint_utils.load_pretrained_component_from_model(
component=text_encoder, checkpoint=args.load_pretrained_mbart_from
)
if getattr(args, "stack_w2v_mbart_encoder", False):
assert getattr(args, "share_w2v_text_encoder", False) is False
spch_encoder = StackedWav2VecEncoderWithAdaptor(
spch_encoder.w2v_encoder,
text_encoder.layers,
text_encoder.layer_norm,
spch_encoder.adaptor,
args.drop_w2v_layers,
)
elif getattr(args, "stack_w2v_mbart_nonorm_encoder", False):
text_encoder.layer_norm = None
spch_encoder = StackedWav2VecEncoderWithAdaptor(
spch_encoder.w2v_encoder,
text_encoder.layers,
text_encoder.layer_norm,
spch_encoder.adaptor,
args.drop_w2v_layers,
)
elif getattr(args, "share_w2v_text_encoder", False):
spch_encoder = SharedEncoder(
spch_encoder.w2v_encoder,
text_encoder,
spch_encoder.adaptor,
args.shared_w2v_layers,
)
for k, p in spch_encoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_w2v_params"
) and need_finetuning(args.finetune_w2v_params, k):
p.requires_grad = True
else:
p.requires_grad = False
for k, p in text_encoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_mbart_encoder_params"
) and need_finetuning(
args.finetune_mbart_encoder_params, k
):
p.requires_grad = True
else:
p.requires_grad = False
cross_attentive_loss_before_last_layer = (
0 if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else -1
)
encoder = DualInputEncoder(
args,
spch_encoder,
text_encoder,
task.src_dict,
cross_attentive_loss_before_last_layer,
)
return encoder
@classmethod
def build_decoder(cls, args, task):
_args = copy.deepcopy(args)
_args.dropout = args.mbart_dropout
_args.attention_dropout = args.mbart_attention_dropout
_args.activation_dropout = args.mbart_activation_dropout
_args.max_target_positions = 1024
dec_emb = nn.Embedding(
len(task.tgt_dict), _args.encoder_embed_dim, task.tgt_dict.pad()
)
decoder = TransformerDecoder(_args, task.tgt_dict, dec_emb)
if getattr(args, "load_pretrained_mbart_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_mbart_from
)
if getattr(args, "no_final_norm_decoder", False):
decoder.layer_norm = None
for k, p in decoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_mbart_decoder_params"
) and need_finetuning(
args.finetune_mbart_decoder_params, k
):
p.requires_grad = True
else:
p.requires_grad = False
compute_cross_attentive_loss = (
True if getattr(args, "attentive_cost_regularization", 0.0) > 0.0 else False
)
cross_attentive_loss_without_norm = getattr(
args, "attentive_cost_without_normalize", False
)
cross_attentive_loss_reverse = (
False # getattr(args, "attentive_cost_reverse", False)
)
decoder = TransformerMultiInputDecoder(
dictionary=task.target_dictionary,
spch_decoder=decoder,
text_decoder=decoder,
compute_cross_attentive_loss=compute_cross_attentive_loss,
cross_attentive_loss_with_norm=True
if not cross_attentive_loss_without_norm
else False,
cross_attentive_loss_reverse=cross_attentive_loss_reverse,
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
dualinputxmtransformer_base(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
@register_model_architecture("dual_input_xm_transformer", "dualinputxmtransformer_base")
def dualinputxmtransformer_base(args):
# wav2vec encoder
set_default_w2v_encoder_args(args)
set_default_adaptor_args(args)
# mbart model
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(
args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim
)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.mbart_attention_dropout = getattr(args, "mbart_attention_dropout", 0.0)
args.mbart_activation_dropout = getattr(args, "mbart_activation_dropout", 0.0)
args.mbart_dropout = getattr(args, "mbart_dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
| 21,461 | 35.687179 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
| 206 | 22 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/scripts/g2p_encode.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import itertools
import logging
import re
import time
from g2p_en import G2p
logger = logging.getLogger(__name__)
FAIL_SENT = "FAILED_SENTENCE"
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("--data-path", type=str, required=True)
parser.add_argument("--out-path", type=str, required=True)
parser.add_argument("--lower-case", action="store_true")
parser.add_argument("--do-filter", action="store_true")
parser.add_argument("--use-word-start", action="store_true")
parser.add_argument("--dup-vowel", default=1, type=int)
parser.add_argument("--dup-consonant", default=1, type=int)
parser.add_argument("--no-punc", action="store_true")
parser.add_argument("--reserve-word", type=str, default="")
parser.add_argument(
"--reserve-first-column",
action="store_true",
help="first column is sentence id",
)
###
parser.add_argument("--parallel-process-num", default=1, type=int)
parser.add_argument("--logdir", default="")
args = parser.parse_args()
return args
def process_sent(sent, g2p, res_wrds, args):
sents = pre_process_sent(sent, args.do_filter, args.lower_case, res_wrds)
pho_seqs = [do_g2p(g2p, s, res_wrds, i == 0) for i, s in enumerate(sents)]
pho_seq = (
[FAIL_SENT]
if [FAIL_SENT] in pho_seqs
else list(itertools.chain.from_iterable(pho_seqs))
)
if args.no_punc:
pho_seq = remove_punc(pho_seq)
if args.dup_vowel > 1 or args.dup_consonant > 1:
pho_seq = dup_pho(pho_seq, args.dup_vowel, args.dup_consonant)
if args.use_word_start:
pho_seq = add_word_start(pho_seq)
return " ".join(pho_seq)
def remove_punc(sent):
ns = []
regex = re.compile("[^a-zA-Z0-9 ]")
for p in sent:
if (not regex.search(p)) or p == FAIL_SENT:
if p == " " and (len(ns) == 0 or ns[-1] == " "):
continue
ns.append(p)
return ns
def do_g2p(g2p, sent, res_wrds, is_first_sent):
if sent in res_wrds:
pho_seq = [res_wrds[sent]]
else:
pho_seq = g2p(sent)
if not is_first_sent:
pho_seq = [" "] + pho_seq # add space to separate
return pho_seq
def pre_process_sent(sent, do_filter, lower_case, res_wrds):
if do_filter:
sent = re.sub("-", " ", sent)
sent = re.sub("—", " ", sent)
if len(res_wrds) > 0:
wrds = sent.split()
wrds = ["SPLIT_ME " + w + " SPLIT_ME" if w in res_wrds else w for w in wrds]
sents = [x.strip() for x in " ".join(wrds).split("SPLIT_ME") if x.strip() != ""]
else:
sents = [sent]
if lower_case:
sents = [s.lower() if s not in res_wrds else s for s in sents]
return sents
def dup_pho(sent, dup_v_num, dup_c_num):
"""
duplicate phoneme defined as cmudict
http://www.speech.cs.cmu.edu/cgi-bin/cmudict
"""
if dup_v_num == 1 and dup_c_num == 1:
return sent
ns = []
for p in sent:
ns.append(p)
if re.search(r"\d$", p):
for i in range(1, dup_v_num):
ns.append(f"{p}-{i}P")
elif re.search(r"\w", p):
for i in range(1, dup_c_num):
ns.append(f"{p}-{i}P")
return ns
def add_word_start(sent):
ns = []
do_add = True
ws = "▁"
for p in sent:
if do_add:
p = ws + p
do_add = False
if p == " ":
do_add = True
else:
ns.append(p)
return ns
def load_reserve_word(reserve_word):
if reserve_word == "":
return []
with open(reserve_word, "r") as fp:
res_wrds = [x.strip().split() for x in fp.readlines() if x.strip() != ""]
assert sum([0 if len(x) == 2 else 1 for x in res_wrds]) == 0
res_wrds = dict(res_wrds)
return res_wrds
def process_sents(sents, args):
g2p = G2p()
out_sents = []
res_wrds = load_reserve_word(args.reserve_word)
for sent in sents:
col1 = ""
if args.reserve_first_column:
col1, sent = sent.split(None, 1)
sent = process_sent(sent, g2p, res_wrds, args)
if args.reserve_first_column and col1 != "":
sent = f"{col1} {sent}"
out_sents.append(sent)
return out_sents
def main():
args = parse()
out_sents = []
with open(args.data_path, "r") as fp:
sent_list = [x.strip() for x in fp.readlines()]
if args.parallel_process_num > 1:
try:
import submitit
except ImportError:
logger.warn(
"submitit is not found and only one job is used to process the data"
)
submitit = None
if args.parallel_process_num == 1 or submitit is None:
out_sents = process_sents(sent_list, args)
else:
# process sentences with parallel computation
lsize = len(sent_list) // args.parallel_process_num + 1
executor = submitit.AutoExecutor(folder=args.logdir)
executor.update_parameters(timeout_min=1000, cpus_per_task=4)
jobs = []
for i in range(args.parallel_process_num):
job = executor.submit(
process_sents, sent_list[lsize * i : lsize * (i + 1)], args
)
jobs.append(job)
is_running = True
while is_running:
time.sleep(5)
is_running = sum([job.done() for job in jobs]) < len(jobs)
out_sents = list(itertools.chain.from_iterable([job.result() for job in jobs]))
with open(args.out_path, "w") as fp:
fp.write("\n".join(out_sents) + "\n")
if __name__ == "__main__":
main()
| 5,840 | 29.421875 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/tasks/speech_text_joint.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from argparse import Namespace
from pathlib import Path
import torch
from fairseq.data import (
encoders,
Dictionary,
ResamplingDataset,
TransformEosLangPairDataset,
ConcatDataset,
)
from fairseq.data.iterators import GroupedEpochBatchIterator
from fairseq.data.audio.multi_modality_dataset import (
MultiModalityDataset,
LangPairMaskDataset,
ModalityDatasetItem,
)
from fairseq.data.audio.speech_to_text_dataset import (
SpeechToTextDataset,
SpeechToTextDatasetCreator,
)
from fairseq.data.audio.speech_to_text_joint_dataset import (
S2TJointDataConfig,
SpeechToTextJointDatasetCreator,
)
from fairseq.tasks import register_task
from fairseq.tasks.speech_to_text import SpeechToTextTask
from fairseq.tasks.translation import load_langpair_dataset
logger = logging.getLogger(__name__)
LANG_TAG_TEMPLATE = "<lang:{}>"
@register_task("speech_text_joint_to_text")
class SpeechTextJointToTextTask(SpeechToTextTask):
"""
Task for joint training speech and text to text.
"""
@classmethod
def add_args(cls, parser):
"""Add task-specific arguments to the parser."""
super(SpeechTextJointToTextTask, cls).add_args(parser)
###
parser.add_argument(
"--parallel-text-data",
default="",
help="path to parallel text data directory",
)
parser.add_argument(
"--max-tokens-text",
type=int,
metavar="N",
help="maximum tokens for encoder text input ",
)
parser.add_argument(
"--max-positions-text",
type=int,
metavar="N",
default=400,
help="maximum tokens for per encoder text input ",
)
parser.add_argument(
"--langpairs",
default=None,
metavar="S",
help='language pairs for text training, separated with ","',
)
parser.add_argument(
"--speech-sample-ratio",
default=1,
type=float,
metavar="N",
help="Multiple Ratio for speech dataset with transcripts ",
)
parser.add_argument(
"--text-sample-ratio",
default=1,
type=float,
metavar="N",
help="Multiple Ratio for text set ",
)
parser.add_argument(
"--update-mix-data",
action="store_true",
help="use mixed data in one update when update-freq > 1",
)
parser.add_argument(
"--load-speech-only", action="store_true", help="load speech data only",
)
parser.add_argument(
"--mask-text-ratio",
type=float,
metavar="V",
default=0.0,
help="mask V source tokens for text only mode",
)
parser.add_argument(
"--mask-text-type",
default="random",
choices=["random", "tail"],
help="mask text typed",
)
parser.add_argument(
"--noise-token",
default="",
help="noise token for masking src text tokens if mask-text-ratio > 0",
)
parser.add_argument(
"--infer-target-lang",
default="",
metavar="S",
help="target language for inference",
)
def __init__(self, args, src_dict, tgt_dict, infer_tgt_lang_id=None):
super().__init__(args, tgt_dict)
self.src_dict = src_dict
self.data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml)
assert self.tgt_dict.pad() == self.src_dict.pad()
assert self.tgt_dict.eos() == self.src_dict.eos()
self.speech_only = args.load_speech_only
self._infer_tgt_lang_id = infer_tgt_lang_id
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries)."""
data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml)
tgt_dict_path = Path(args.data) / data_cfg.vocab_filename
src_dict_path = Path(args.data) / data_cfg.src_vocab_filename
if (not os.path.isfile(src_dict_path)) or (not os.path.isfile(tgt_dict_path)):
raise FileNotFoundError("Dict not found: {}".format(args.data))
src_dict = Dictionary.load(src_dict_path.as_posix())
tgt_dict = Dictionary.load(tgt_dict_path.as_posix())
print("| src dictionary: {} types".format(len(src_dict)))
print("| tgt dictionary: {} types".format(len(tgt_dict)))
if args.parallel_text_data != "":
if not os.path.isabs(args.parallel_text_data):
args.parallel_text_data = os.path.join(
args.data, args.parallel_text_data
)
if args.langpairs is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
infer_tgt_lang_id = None
if args.infer_target_lang != "" and data_cfg.prepend_tgt_lang_tag_no_change:
tgt_lang_tag = SpeechToTextDataset.LANG_TAG_TEMPLATE.format(
args.infer_target_lang
)
infer_tgt_lang_id = tgt_dict.index(tgt_lang_tag)
assert infer_tgt_lang_id != tgt_dict.unk()
return cls(args, src_dict, tgt_dict, infer_tgt_lang_id=infer_tgt_lang_id)
def load_langpair_dataset(
self, prepend_tgt_lang_tag=False, sampling_alpha=1.0, epoch=0
):
lang_pairs = []
text_dataset = None
split = "train"
for lp in self.args.langpairs.split(","):
src, tgt = lp.split("-")
text_dataset = load_langpair_dataset(
self.args.parallel_text_data,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=True,
dataset_impl=None,
upsample_primary=1,
left_pad_source=False,
left_pad_target=False,
max_source_positions=self.args.max_positions_text,
max_target_positions=self.args.max_target_positions,
load_alignments=False,
truncate_source=False,
)
if prepend_tgt_lang_tag:
# TODO
text_dataset = TransformEosLangPairDataset(
text_dataset,
src_eos=self.src_dict.eos(),
tgt_bos=self.tgt_dict.eos(), # 'prev_output_tokens' starts with eos
new_tgt_bos=self.tgt_dict.index(LANG_TAG_TEMPLATE.format(tgt)),
)
lang_pairs.append(text_dataset)
if len(lang_pairs) > 1:
if sampling_alpha != 1.0:
size_ratios = SpeechToTextDatasetCreator.get_size_ratios(
self.args.langpairs.split(","),
[len(s) for s in lang_pairs],
alpha=sampling_alpha,
)
lang_pairs = [
ResamplingDataset(d, size_ratio=r, epoch=epoch, replace=(r >= 1.0))
for d, r in zip(lang_pairs, size_ratios)
]
return ConcatDataset(lang_pairs)
return text_dataset
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=self._infer_tgt_lang_id,
)
def build_src_tokenizer(self, args):
logger.info(f"src-pre-tokenizer: {self.data_cfg.src_pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.src_pre_tokenizer))
def build_src_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.src_bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.src_bpe_tokenizer))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
src_pre_tokenizer = self.build_src_tokenizer(self.args)
src_bpe_tokenizer = self.build_src_bpe(self.args)
ast_dataset = SpeechToTextJointDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
src_dict=None if self.speech_only else self.src_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
src_pre_tokenizer=src_pre_tokenizer,
src_bpe_tokenizer=src_bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
)
noise_token_id = -1
text_dataset = None
if self.args.parallel_text_data != "" and is_train_split:
text_dataset = self.load_langpair_dataset(
self.data_cfg.prepend_tgt_lang_tag_no_change, 1.0, epoch=epoch,
)
if self.args.mask_text_ratio > 0:
# add mask
noise_token_id = (
self.src_dict.unk()
if self.args.noise_token == ""
else self.src_dict.index(self.args.noise_token)
)
text_dataset = LangPairMaskDataset(
text_dataset,
src_bos=self.src_dict.bos(),
src_eos=self.src_dict.eos(),
noise_id=noise_token_id,
mask_ratio=self.args.mask_text_ratio,
mask_type=self.args.mask_text_type,
)
if text_dataset is not None:
mdsets = [
ModalityDatasetItem(
"sup_speech",
ast_dataset,
(self.args.max_source_positions, self.args.max_target_positions),
self.args.max_tokens,
self.args.batch_size,
),
ModalityDatasetItem(
"text",
text_dataset,
(self.args.max_positions_text, self.args.max_target_positions),
self.args.max_tokens_text
if self.args.max_tokens_text is not None
else self.args.max_tokens,
self.args.batch_size,
),
]
ast_dataset = MultiModalityDataset(mdsets)
self.datasets[split] = ast_dataset
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.tgt_dict
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
return None if self.speech_only else self.src_dict
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=0,
data_buffer_size=0,
disable_iterator_cache=False,
skip_remainder_batch=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
if not isinstance(dataset, MultiModalityDataset):
return super(SpeechTextJointToTextTask, self).get_batch_iterator(
dataset,
max_tokens,
max_sentences,
max_positions,
ignore_invalid_inputs,
required_batch_size_multiple,
seed,
num_shards,
shard_id,
num_workers,
epoch,
data_buffer_size,
disable_iterator_cache,
skip_remainder_batch=skip_remainder_batch,
update_epoch_batch_itr=update_epoch_batch_itr,
)
mult_ratio = [self.args.speech_sample_ratio, self.args.text_sample_ratio]
assert len(dataset.datasets) == 2
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
batch_samplers = dataset.get_batch_samplers(
mult_ratio, required_batch_size_multiple, seed
)
# return a reusable, sharded iterator
epoch_iter = GroupedEpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_samplers=batch_samplers,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
mult_rate=1 if self.args.update_mix_data else max(self.args.update_freq),
buffer_size=data_buffer_size,
skip_remainder_batch=skip_remainder_batch,
)
self.dataset_to_epoch_iter[dataset] = {} # refresh it every epoch
return epoch_iter
| 13,654 | 35.124339 | 88 |
py
|
sign-topic
|
sign-topic-main/examples/speech_text_joint_to_text/tasks/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
| 206 | 22 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/constrained_decoding/tok.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import sacremoses
def main(args):
"""Tokenizes, preserving tabs"""
mt = sacremoses.MosesTokenizer(lang=args.lang)
def tok(s):
return mt.tokenize(s, return_str=True)
for line in sys.stdin:
parts = list(map(tok, line.split("\t")))
print(*parts, sep="\t", flush=True)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--lang", "-l", default="en")
parser.add_argument("--penn", "-p", action="store_true")
parser.add_argument("--fields", "-f", help="fields to tokenize")
args = parser.parse_args()
main(args)
| 844 | 23.142857 | 68 |
py
|
sign-topic
|
sign-topic-main/examples/constrained_decoding/normalize.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from sacremoses.normalize import MosesPunctNormalizer
def main(args):
normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn)
for line in sys.stdin:
print(normalizer.normalize(line.rstrip()), flush=True)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--lang", "-l", default="en")
parser.add_argument("--penn", "-p", action="store_true")
args = parser.parse_args()
main(args)
| 698 | 23.964286 | 69 |
py
|
sign-topic
|
sign-topic-main/examples/rxf/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import rxf_src # noqa
| 208 | 28.857143 | 65 |
py
|
sign-topic
|
sign-topic-main/examples/rxf/rxf_src/label_smoothed_cross_entropy_r3f.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
@register_criterion("label_smoothed_cross_entropy_r3f")
class LabelSmoothedCrossEntropyR3FCriterion(FairseqCriterion):
def __init__(
self, task, sentence_avg, label_smoothing, eps, r3f_lambda, noise_type
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.label_smoothing = label_smoothing
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='normal',
choices=['normal', 'uniform'],
help='type of noises')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
token_embeddings = model.encoder.embed_tokens(sample["net_input"]["src_tokens"])
input_logits, extra = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(
model, (input_logits, extra), sample, reduce=reduce
)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
if model.training:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.clone() + noise
noised_logits, _ = model(
**sample["net_input"], token_embeddings=noised_embeddings
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if model.training:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.label_smoothing,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
metrics.log_scalar("symm_kl", symm_kl_sum / sample_size, sample_size, round=3)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 6,109 | 37.670886 | 91 |
py
|
sign-topic
|
sign-topic-main/examples/rxf/rxf_src/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa
| 258 | 36 | 79 |
py
|
sign-topic
|
sign-topic-main/examples/rxf/rxf_src/sentence_prediction_r3f.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_prediction_r3f")
class SentencePredictionR3F(FairseqCriterion):
def __init__(
self,
task,
eps,
r3f_lambda,
noise_type,
classification_head_name,
regression_target,
):
super().__init__(task)
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
self.classification_head_name = classification_head_name
self.regression_target = regression_target
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='uniform',
choices=['normal', 'uniform'],
help='type of noises for RXF methods')
parser.add_argument('--classification-head-name',
default='sentence_classification_head',
help='name of the classification head to use')
parser.add_argument('--regression-target', action='store_true')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=sentence_prediction"
token_embeddings = model.encoder.sentence_encoder.embed_tokens(
sample["net_input"]["src_tokens"]
)
input_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=token_embeddings,
)
if model.training and self.noise_sampler:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.detach().clone() + noise
noised_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=noised_embeddings,
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
else:
symm_kl = 0
targets = model.get_targets(sample, [input_logits]).view(-1)
sample_size = targets.numel()
if not self.regression_target:
loss = F.nll_loss(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
targets,
reduction="sum",
)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
else:
logits = input_logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction="sum")
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = input_logits.max(dim=1)[1]
logging_output.update(ncorrect=(preds == targets).sum().item())
if model.training and self.noise_sampler:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"symm_kl": symm_kl_sum / sample_size,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
agg_output.update(accuracy=ncorrect / nsentences)
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
return agg_output
| 6,587 | 37.302326 | 96 |
py
|
sign-topic
|
sign-topic-main/examples/megatron_11b/detok.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import fileinput
import sacremoses
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("files", nargs="*", help="input files")
args = parser.parse_args()
detok = sacremoses.MosesDetokenizer()
for line in fileinput.input(args.files, openhook=fileinput.hook_compressed):
print(
detok.detokenize(line.strip().split(" "))
.replace(" @", "")
.replace("@ ", "")
.replace(" =", "=")
.replace("= ", "=")
.replace(" – ", "–")
)
if __name__ == "__main__":
main()
| 821 | 23.909091 | 80 |
py
|
sign-topic
|
sign-topic-main/scripts/count_docs.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Count the number of documents and average number of lines and tokens per
document in a large file. Documents should be separated by a single empty line.
"""
import argparse
import gzip
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("--gzip", action="store_true")
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, "r")
else:
return open(args.input, "r", encoding="utf-8")
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for i, line in enumerate(h):
if len(line.strip()) == 0: # empty line indicates new document
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
print("found {} docs".format(num_docs))
print("average num lines per doc: {}".format(np.mean(num_lines)))
print("average num toks per doc: {}".format(np.mean(num_toks)))
if __name__ == "__main__":
main()
| 1,784 | 29.254237 | 79 |
py
|
sign-topic
|
sign-topic-main/scripts/read_binarized.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from fairseq.data import Dictionary, data_utils, indexed_dataset
def get_parser():
parser = argparse.ArgumentParser(
description="writes text from binarized file to stdout"
)
# fmt: off
parser.add_argument('--dataset-impl', help='dataset implementation',
choices=indexed_dataset.get_available_dataset_impl())
parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None)
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
dictionary = Dictionary.load(args.dict) if args.dict is not None else None
dataset = data_utils.load_indexed_dataset(
args.input,
dictionary,
dataset_impl=args.dataset_impl,
default="lazy",
)
for tensor_line in dataset:
if dictionary is None:
line = " ".join([str(int(x)) for x in tensor_line])
else:
line = dictionary.string(tensor_line)
print(line)
if __name__ == "__main__":
main()
| 1,370 | 26.979592 | 103 |
py
|
sign-topic
|
sign-topic-main/scripts/compare_namespaces.py
|
#!/usr/bin/env python
"""Helper script to compare two argparse.Namespace objects."""
from argparse import Namespace # noqa
def main():
ns1 = eval(input("Namespace 1: "))
ns2 = eval(input("Namespace 2: "))
def keys(ns):
ks = set()
for k in dir(ns):
if not k.startswith("_"):
ks.add(k)
return ks
k1 = keys(ns1)
k2 = keys(ns2)
def print_keys(ks, ns1, ns2=None):
for k in ks:
if ns2 is None:
print("{}\t{}".format(k, getattr(ns1, k, None)))
else:
print(
"{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None))
)
print("Keys unique to namespace 1:")
print_keys(k1 - k2, ns1)
print()
print("Keys unique to namespace 2:")
print_keys(k2 - k1, ns2)
print()
print("Overlapping keys with different values:")
ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")]
print_keys(ks, ns1, ns2)
print()
if __name__ == "__main__":
main()
| 1,090 | 22.212766 | 88 |
py
|
sign-topic
|
sign-topic-main/scripts/split_train_valid_docs.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into a train and valid set while respecting document
boundaries. Documents should be separated by a single empty line.
"""
import argparse
import random
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("sample_output", help="train output file")
parser.add_argument("remainder_output", help="valid output file")
parser.add_argument("-k", type=int, help="remainder size")
parser.add_argument(
"--lines", action="store_true", help="split lines instead of docs"
)
args = parser.parse_args()
assert args.k is not None
sample = []
remainder = []
num_docs = [0]
def update_sample(doc):
if len(sample) < args.k:
sample.append(doc.copy())
else:
i = num_docs[0]
j = random.randrange(i + 1)
if j < args.k:
remainder.append(sample[j])
sample[j] = doc.copy()
else:
remainder.append(doc.copy())
num_docs[0] += 1
doc.clear()
with open(args.input, "r", encoding="utf-8") as h:
doc = []
for i, line in enumerate(h):
if line.strip() == "": # empty line indicates new document
update_sample(doc)
else:
doc.append(line)
if args.lines:
update_sample(doc)
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
if len(doc) > 0:
update_sample(doc)
print(file=sys.stderr, flush=True)
assert len(sample) == args.k
with open(args.sample_output, "w", encoding="utf-8") as out:
first = True
for doc in sample:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
with open(args.remainder_output, "w", encoding="utf-8") as out:
first = True
for doc in remainder:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
if __name__ == "__main__":
main()
| 2,551 | 28.333333 | 74 |
py
|
sign-topic
|
sign-topic-main/scripts/average_checkpoints.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import os
import re
import torch
from fairseq.file_io import PathManager
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with PathManager.open(fpath, "rb") as f:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, "cpu")
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state["model"]
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
"For checkpoint {}, expected list of params: {}, "
"but found: {}".format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state["model"] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt")
else:
pt_regexp = re.compile(r"checkpoint(\d+)\.pt")
files = PathManager.ls(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception(
"Found {} checkpoint files but need at least {}", len(entries), n
)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description="Tool to average the params of input checkpoints to "
"produce a new checkpoint",
)
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the '
'path specified by input, and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by'
' input, and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int,
help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, '
'when using --num-update-checkpoints, this will set an upper bound on which update to use'
'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be'
' averaged.'
'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would'
' be averaged assuming --save-interval-updates 500'
)
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.checkpoint_upper_bound is None or (
args.num_epoch_checkpoints is not None
or args.num_update_checkpoints is not None
), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints"
assert (
args.num_epoch_checkpoints is None or args.num_update_checkpoints is None
), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints"
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs,
num,
is_update_based,
upper_bound=args.checkpoint_upper_bound,
)
print("averaging checkpoints: ", args.inputs)
new_state = average_checkpoints(args.inputs)
with PathManager.open(args.output, "wb") as f:
torch.save(new_state, f)
print("Finished writing averaged checkpoint to {}".format(args.output))
if __name__ == "__main__":
main()
| 6,075 | 36.73913 | 126 |
py
|
sign-topic
|
sign-topic-main/scripts/build_sym_alignment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Use this script in order to build symmetric alignments for your translation
dataset.
This script depends on fast_align and mosesdecoder tools. You will need to
build those before running the script.
fast_align:
github: http://github.com/clab/fast_align
instructions: follow the instructions in README.md
mosesdecoder:
github: http://github.com/moses-smt/mosesdecoder
instructions: http://www.statmt.org/moses/?n=Development.GetStarted
The script produces the following files under --output_dir:
text.joined - concatenation of lines from the source_file and the
target_file.
align.forward - forward pass of fast_align.
align.backward - backward pass of fast_align.
aligned.sym_heuristic - symmetrized alignment.
"""
import argparse
import os
from itertools import zip_longest
def main():
parser = argparse.ArgumentParser(description="symmetric alignment builer")
# fmt: off
parser.add_argument('--fast_align_dir',
help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir',
help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic',
help='heuristic to use for symmetrization',
default='grow-diag-final-and')
parser.add_argument('--source_file',
help='path to a file with sentences '
'in the source language')
parser.add_argument('--target_file',
help='path to a file with sentences '
'in the target language')
parser.add_argument('--output_dir',
help='output directory')
# fmt: on
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, "fast_align")
symal_bin = os.path.join(args.mosesdecoder_dir, "bin", "symal")
sym_fast_align_bin = os.path.join(
args.mosesdecoder_dir, "scripts", "ems", "support", "symmetrize-fast-align.perl"
)
# create joined file
joined_file = os.path.join(args.output_dir, "text.joined")
with open(args.source_file, "r", encoding="utf-8") as src, open(
args.target_file, "r", encoding="utf-8"
) as tgt:
with open(joined_file, "w", encoding="utf-8") as joined:
for s, t in zip_longest(src, tgt):
print("{} ||| {}".format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, "align.backward")
# run forward alignment
fwd_align_file = os.path.join(args.output_dir, "align.forward")
fwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v > {FWD}".format(
FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file
)
assert os.system(fwd_fast_align_cmd) == 0
# run backward alignment
bwd_align_file = os.path.join(args.output_dir, "align.backward")
bwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}".format(
FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file
)
assert os.system(bwd_fast_align_cmd) == 0
# run symmetrization
sym_out_file = os.path.join(args.output_dir, "aligned")
sym_cmd = "{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}".format(
SYMFASTALIGN=sym_fast_align_bin,
FWD=fwd_align_file,
BWD=bwd_align_file,
SRC=args.source_file,
TGT=args.target_file,
OUT=sym_out_file,
HEURISTIC=args.sym_heuristic,
SYMAL=symal_bin,
)
assert os.system(sym_cmd) == 0
if __name__ == "__main__":
main()
| 3,796 | 37.744898 | 88 |
py
|
sign-topic
|
sign-topic-main/scripts/spm_decode.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="sentencepiece model to use for decoding"
)
parser.add_argument("--input", required=True, help="input file to decode")
parser.add_argument("--input_format", choices=["piece", "id"], default="piece")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.input_format == "piece":
def decode(input):
return "".join(sp.DecodePieces(input))
elif args.input_format == "id":
def decode(input):
return "".join(sp.DecodeIds(input))
else:
raise NotImplementedError
def tok2int(tok):
# remap reference-side <unk> (represented as <<unk>>) to 0
return int(tok) if tok != "<<unk>>" else 0
with open(args.input, "r", encoding="utf-8") as h:
for line in h:
if args.input_format == "id":
print(decode(list(map(tok2int, line.rstrip().split()))))
elif args.input_format == "piece":
print(decode(line.rstrip().split()))
if __name__ == "__main__":
main()
| 1,520 | 27.166667 | 83 |
py
|
sign-topic
|
sign-topic-main/scripts/rm_pt.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import re
import shutil
import sys
pt_regexp = re.compile(r"checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt")
pt_regexp_epoch_based = re.compile(r"checkpoint(\d+)\.pt")
pt_regexp_update_based = re.compile(r"checkpoint_\d+_(\d+)\.pt")
def parse_checkpoints(files):
entries = []
for f in files:
m = pt_regexp_epoch_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
else:
m = pt_regexp_update_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
return entries
def last_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(entries, reverse=True)[:n]]
def every_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(sorted(entries)[::-n])]
def main():
parser = argparse.ArgumentParser(
description=(
"Recursively delete checkpoint files from `root_dir`, "
"but preserve checkpoint_best.pt and checkpoint_last.pt"
)
)
parser.add_argument("root_dirs", nargs="*")
parser.add_argument(
"--save-last", type=int, default=0, help="number of last checkpoints to save"
)
parser.add_argument(
"--save-every", type=int, default=0, help="interval of checkpoints to save"
)
parser.add_argument(
"--preserve-test",
action="store_true",
help="preserve checkpoints in dirs that start with test_ prefix (default: delete them)",
)
parser.add_argument(
"--delete-best", action="store_true", help="delete checkpoint_best.pt"
)
parser.add_argument(
"--delete-last", action="store_true", help="delete checkpoint_last.pt"
)
parser.add_argument(
"--no-dereference", action="store_true", help="don't dereference symlinks"
)
args = parser.parse_args()
files_to_desymlink = []
files_to_preserve = []
files_to_delete = []
for root_dir in args.root_dirs:
for root, _subdirs, files in os.walk(root_dir):
if args.save_last > 0:
to_save = last_n_checkpoints(files, args.save_last)
else:
to_save = []
if args.save_every > 0:
to_save += every_n_checkpoints(files, args.save_every)
for file in files:
if not pt_regexp.fullmatch(file):
continue
full_path = os.path.join(root, file)
if (
not os.path.basename(root).startswith("test_") or args.preserve_test
) and (
(file == "checkpoint_last.pt" and not args.delete_last)
or (file == "checkpoint_best.pt" and not args.delete_best)
or file in to_save
):
if os.path.islink(full_path) and not args.no_dereference:
files_to_desymlink.append(full_path)
else:
files_to_preserve.append(full_path)
else:
files_to_delete.append(full_path)
if len(files_to_desymlink) == 0 and len(files_to_delete) == 0:
print("Nothing to do.")
sys.exit(0)
files_to_desymlink = sorted(files_to_desymlink)
files_to_preserve = sorted(files_to_preserve)
files_to_delete = sorted(files_to_delete)
print("Operations to perform (in order):")
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
print(" - preserve (and dereference symlink): " + file)
if len(files_to_preserve) > 0:
for file in files_to_preserve:
print(" - preserve: " + file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print(" - delete: " + file)
while True:
resp = input("Continue? (Y/N): ")
if resp.strip().lower() == "y":
break
elif resp.strip().lower() == "n":
sys.exit(0)
print("Executing...")
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
realpath = os.path.realpath(file)
print("rm " + file)
os.remove(file)
print("cp {} {}".format(realpath, file))
shutil.copyfile(realpath, file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print("rm " + file)
os.remove(file)
if __name__ == "__main__":
main()
| 4,740 | 32.387324 | 96 |
py
|
sign-topic
|
sign-topic-main/scripts/__init__.py
| 0 | 0 | 0 |
py
|
|
sign-topic
|
sign-topic-main/scripts/spm_train.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import sentencepiece as spm
if __name__ == "__main__":
spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
| 431 | 24.411765 | 82 |
py
|
sign-topic
|
sign-topic-main/scripts/shard_docs.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into shards while respecting document boundaries. Documents
should be separated by a single empty line.
"""
import argparse
import contextlib
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("--num-shards", type=int)
args = parser.parse_args()
assert args.num_shards is not None and args.num_shards > 1
with open(args.input, "r", encoding="utf-8") as h:
with contextlib.ExitStack() as stack:
outputs = [
stack.enter_context(
open(args.input + ".shard" + str(i), "w", encoding="utf-8")
)
for i in range(args.num_shards)
]
doc = []
first_doc = [True] * args.num_shards
def output_doc(i):
if not first_doc[i]:
outputs[i].write("\n")
first_doc[i] = False
for line in doc:
outputs[i].write(line)
doc.clear()
num_docs = 0
for line in h:
if line.strip() == "": # empty line indicates new document
output_doc(num_docs % args.num_shards)
num_docs += 1
else:
doc.append(line)
output_doc(num_docs % args.num_shards)
if __name__ == "__main__":
main()
| 1,616 | 28.4 | 79 |
py
|
sign-topic
|
sign-topic-main/scripts/spm_encode.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import contextlib
import sys
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="sentencepiece model to use for encoding"
)
parser.add_argument(
"--inputs", nargs="+", default=["-"], help="input files to filter/encode"
)
parser.add_argument(
"--outputs", nargs="+", default=["-"], help="path to save encoded outputs"
)
parser.add_argument("--output_format", choices=["piece", "id"], default="piece")
parser.add_argument(
"--min-len",
type=int,
metavar="N",
help="filter sentence pairs with fewer than N tokens",
)
parser.add_argument(
"--max-len",
type=int,
metavar="N",
help="filter sentence pairs with more than N tokens",
)
args = parser.parse_args()
assert len(args.inputs) == len(
args.outputs
), "number of input and output paths should match"
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.output_format == "piece":
def encode(input):
return sp.EncodeAsPieces(input)
elif args.output_format == "id":
def encode(input):
return list(map(str, sp.EncodeAsIds(input)))
else:
raise NotImplementedError
if args.min_len is not None or args.max_len is not None:
def valid(line):
return (args.min_len is None or len(line) >= args.min_len) and (
args.max_len is None or len(line) <= args.max_len
)
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-"
else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-"
else sys.stdout
for output in args.outputs
]
stats = {
"num_empty": 0,
"num_filtered": 0,
}
def encode_line(line):
line = line.strip()
if len(line) > 0:
line = encode(line)
if valid(line):
return line
else:
stats["num_filtered"] += 1
else:
stats["num_empty"] += 1
return None
for i, lines in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if not any(enc_line is None for enc_line in enc_lines):
for enc_line, output_h in zip(enc_lines, outputs):
print(" ".join(enc_line), file=output_h)
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr)
print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr)
if __name__ == "__main__":
main()
| 3,432 | 27.608333 | 84 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.