repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
wav2letter
|
wav2letter-main/recipes/lexicon_free/utilities/compute_lower_ppl_kenlm.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Compute upper and lower limits on word perplexity for kenlm ngram models
Command : python3 compute_upper_ppl_kenlm.py --vocab_file [...] --text [...] \
--char_model [...] --word_model [...]
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import kenlm
import numpy
from utils import EOS, prepare_vocabs, transform_asg, transform_asg_back
LOG10 = numpy.log(10)
def compute_word_logprob(model, current_state, target_word):
word_pred = 0
if target_word == EOS:
stateOut = kenlm.State()
word_pred += model.BaseScore(current_state, str(target_word), stateOut) * LOG10
else:
stateIn = current_state
for token in list(target_word):
stateOut = kenlm.State()
word_pred += model.BaseScore(stateIn, token, stateOut) * LOG10
stateIn = stateOut
return word_pred
def compute_denominator(model, current_state, words):
preds = [compute_word_logprob(model, current_state, word) for word in words]
max_pred = numpy.max(preds)
return max_pred + numpy.log(numpy.sum(numpy.exp(preds - max_pred)))
def compute_words_model_pdf_mass(word_model, known_words, stateIn):
probs = []
for word in known_words:
stateOut = kenlm.State()
probs.append(
numpy.power(10, word_model.BaseScore(stateIn, str(word), stateOut))
)
probs_arr = numpy.array(probs)
indices = numpy.argsort(-probs_arr)
top = numpy.where(numpy.cumsum(probs_arr[indices]) > 0.95)[0][0]
return [
transform_asg(w) + "|" if w != EOS else w for w in known_words[indices[:top]]
]
def compute_ppl_lower_limit(
model, word_model, sentences, known_words, known_words_original
):
n_words = 0
unk_n_words = 0
ppl = 0.0
ppl_lower = 0.0
n_logging = len(sentences)
for n, sentence in enumerate(sentences):
stateIn = kenlm.State()
word_stateIn = kenlm.State()
model.BeginSentenceWrite(stateIn)
word_model.BeginSentenceWrite(word_stateIn)
current_word = ""
word_score = 0.0
word_state = stateIn # state for char LM ending with exactly the previous word
for token in sentence.split(" "):
stateOut = kenlm.State()
word_score += model.BaseScore(stateIn, token, stateOut) * LOG10
stateIn = stateOut
current_word += token
if token == "|":
if current_word in known_words:
n_words += 1
ppl += word_score
pdf_mass_words = set(
compute_words_model_pdf_mass(
word_model, known_words_original, word_stateIn
)
)
if current_word not in pdf_mass_words:
pdf_mass_words.add(current_word)
ppl_lower += compute_word_logprob(
model, word_state, current_word
) - compute_denominator(model, word_state, pdf_mass_words)
else:
unk_n_words += 1
word_stateOut = kenlm.State()
word_model.BaseScore(
word_stateIn, transform_asg_back(current_word), word_stateOut
)
word_stateIn = word_stateOut
current_word = ""
word_score = 0.0
word_state = stateOut
stateOut = kenlm.State()
n_words += 1
ppl += model.BaseScore(stateIn, EOS, stateOut) * LOG10
pdf_mass_words = set(
compute_words_model_pdf_mass(word_model, known_words_original, word_stateIn)
)
if EOS not in pdf_mass_words:
pdf_mass_words.add(EOS)
ppl_lower += compute_word_logprob(model, word_state, EOS) - compute_denominator(
model, word_state, pdf_mass_words
)
if n % 10 == 0:
print(
"Evaluated",
n,
"sentences among",
n_logging,
"upper limit perplexity",
numpy.exp(-ppl / n_words),
"lower limit perplexity",
numpy.exp(-ppl_lower / n_words),
"number of words",
n_words,
flush=True,
)
print("Final loss", ppl, "loss lower", ppl_lower)
print("Upper limit on perplexity:", numpy.exp(-ppl / n_words))
print("Lower limit on perplexity:", numpy.exp(-ppl_lower / n_words))
print("Total number of words:", n_words, "unknown words:", unk_n_words)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upper and lower limit on word perplexity for kenlm char model"
)
parser.add_argument(
"--vocab_file",
help="vocabulary of known words, use file "
"from --limit_vocab_file during word kenLM training.",
)
parser.add_argument(
"--text", help="file to evaluate, prepared for char lm training"
)
parser.add_argument("--char_model", help="kenlm char model")
parser.add_argument("--word_model", help="kenlm word model")
args = parser.parse_args()
print("Evaluate file {}".format(args.text))
known_words, known_words_original = prepare_vocabs(args.vocab_file)
with open(args.text, "r") as f:
sentences = [line.strip() for line in f]
word_model = kenlm.LanguageModel(args.word_model)
char_model = kenlm.LanguageModel(args.char_model)
compute_ppl_lower_limit(
char_model, word_model, sentences, known_words, known_words_original
)
| 5,939 | 32.559322 | 88 |
py
|
wav2letter
|
wav2letter-main/recipes/lexicon_free/utilities/compute_upper_ppl_convlm.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Compute upper limit on word perplexity for convlm models
Command (for word) : python3 compute_upper_ppl_convlm.py --model [...] \
--dict [...] --text [...] --model_type word --dataset_type ls
Command (for char) : python3 compute_upper_ppl_convlm.py --model [...] \
--dict [...] --word_dict [...] --text [...] \
--model_type char14B --dataset_type ls
Command (for char) : python3 compute_upper_ppl_convlm.py --model [...] \
--dict [...] --word_dict [...] --text [...] \
--model_type char20B --dataset_type ls
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import numpy
import torch
from convlm_utils import (
EOSIDX,
UNKIDX,
build_token_index_correspondence,
decodeInputText,
load_char_model_14B,
load_char_model_20B,
load_word_model,
)
from fairseq.data import Dictionary
from utils import transform_asg
def compute_ppl_upper_limit_char_convlm(
model,
input_charlm,
charLM_indices_token_dict,
charLM_token_indices_dict,
known_words,
):
sum_logp = 0
n_words = 0
sum_logp_known = 0
n_known_words = 0
sum_logp_unknown = 0
n_unknown_words = 0
n_letters = 0
for sentence in input_charlm:
x = torch.LongTensor([EOSIDX] + sentence).reshape(1, len(sentence) + 1).cuda()
with torch.no_grad():
y = model.forward(x)[0]
logprobs = torch.nn.functional.log_softmax(y, 2).detach().cpu().numpy()[0]
current_word = ""
word_ppl = 0.0
for index, token_id in enumerate(sentence):
n_letters += 1
current_word += charLM_indices_token_dict[token_id]
word_ppl += logprobs[index, token_id]
if charLM_indices_token_dict[token_id] == "|":
if current_word in known_words:
sum_logp_known += word_ppl
n_known_words += 1
else:
sum_logp_unknown += word_ppl
n_unknown_words += 1
current_word = ""
word_ppl = 0
sum_logp += numpy.sum(logprobs[numpy.arange(len(sentence)), sentence])
n_words += numpy.sum(numpy.array(sentence) == charLM_token_indices_dict["|"])
# add eos
sum_logp += logprobs[-1, EOSIDX]
n_words += 1
sum_logp_known += logprobs[-1, EOSIDX]
n_known_words += 1
n_letters += 1
loss_letter = -(sum_logp + sum_logp_unknown) / n_letters
ppl_word_no_unk = numpy.exp(-sum_logp_known / n_known_words)
ppl_word_unk = numpy.exp(-sum_logp_unknown / n_unknown_words)
assert n_known_words + n_unknown_words == n_words, "Error in words counting"
assert numpy.allclose(sum_logp, sum_logp_known + sum_logp_unknown), "Error in loss"
ppl_word = numpy.exp(-sum_logp / n_words)
print(
"Letter loss: {}, letter perplexity: {}".format(
loss_letter, numpy.exp(loss_letter)
)
)
print("Upper word perplexity for all words: {}".format(ppl_word))
print("Upper word perplexity for unknown words: {}".format(ppl_word_unk))
print(
"(Reported in the paper) "
"Upper word perplexity for known words: {}".format(ppl_word_no_unk)
)
def compute_ppl_upper_limit_word_convlm(model, input_wordlm):
sum_logp_known = 0
n_known_words = 0
sum_logp_unknown = 0
n_unknown_words = 0
for sentence in input_wordlm:
x = torch.LongTensor([EOSIDX] + sentence).reshape(1, len(sentence) + 1).cuda()
with torch.no_grad():
y = model.forward(x)[0]
logprobs = (
model.adaptive_softmax.get_log_prob(y, None).detach().cpu().numpy()[0]
)
for index, token_id in enumerate(sentence):
if token_id != UNKIDX:
sum_logp_known += logprobs[index, token_id]
n_known_words += 1
else:
sum_logp_unknown += logprobs[index, token_id]
n_unknown_words += 1
# add eos
sum_logp_known += logprobs[-1, EOSIDX]
n_known_words += 1
ppl_word_no_unk = numpy.exp(-sum_logp_known / n_known_words)
ppl_word_unk = numpy.exp(-sum_logp_unknown / n_unknown_words)
ppl_word = numpy.exp(
-(sum_logp_known + sum_logp_unknown) / (n_known_words + n_unknown_words)
)
print("Word perplexity for all words: {}".format(ppl_word))
print("Word perplexity for unknown words: {}".format(ppl_word_unk))
print(
"(Reported in the paper) "
"Word perplexity for known words: {}".format(ppl_word_no_unk)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upper limit on word perplexity for convlm models"
)
parser.add_argument("--model", help="path to convlm model")
parser.add_argument("--dict", help="path to convlm dict file in data")
parser.add_argument(
"--text", help="file to evaluate, in necessary format for model"
)
parser.add_argument("--model_type", help='"word" or "char14B" or "char20B"')
parser.add_argument("--dataset_type", help='"ls" or "wsj"', default="ls")
parser.add_argument(
"--word_dict",
help="path to convlm word convlm dict file"
"in data (ignored for word model eval)",
default=None,
)
args = parser.parse_args()
print("Evaluate file {}".format(args.text))
token_indices_dict, indices_token_dict = build_token_index_correspondence(args.dict)
with open(args.text, "r") as f:
sentences = [line.strip() for line in f]
input_data = decodeInputText(sentences, token_indices_dict)
fairseq_dict = Dictionary.load(args.dict)
if args.model_type == "word":
model = load_word_model(args.model, fairseq_dict, args.dataset_type)
compute_ppl_upper_limit_word_convlm(model, input_data)
else:
with open(args.word_dict, "r") as f:
known_words = set(
[transform_asg(line.strip().split(" ")[0]) + "|" for line in f]
)
if "14B" in args.model_type:
model = load_char_model_14B(args.model, fairseq_dict, args.dataset_type)
else:
model = load_char_model_20B(args.model, fairseq_dict, args.dataset_type)
compute_ppl_upper_limit_char_convlm(
model, input_data, indices_token_dict, token_indices_dict, known_words
)
| 6,783 | 33.969072 | 88 |
py
|
wav2letter
|
wav2letter-main/recipes/lexicon_free/utilities/utils.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import numpy
EOS = "</s>"
def convert_words_to_letters_asg_rep2(fin_name, fout_name):
with open(fin_name, "r") as fin, open(fout_name, "w") as fout:
for line in fin:
words = line.strip().split(" ")
for word in words:
word = re.sub("[^a-z'.]+", "", word)
if len(word) == 0:
continue
new_word = transform_asg(word) + "|"
fout.write(" ".join(list(new_word)) + " ")
fout.write("\n")
def transform_asg(word):
if word == "":
return ""
new_word = word[0]
prev = word[0]
repetition = 0
for letter in word[1:]:
if letter == prev:
repetition += 1
else:
if repetition != 0:
new_word += "1" if repetition == 1 else "2"
repetition = 0
new_word += letter
prev = letter
if repetition != 0:
new_word += "1" if repetition == 1 else "2"
return new_word
def transform_asg_back(word):
new_word = ""
for letter in word:
if letter == "|":
continue
if letter == "1":
new_word += new_word[-1]
elif letter == "2":
new_word += new_word[-1] + new_word[-1]
else:
new_word += letter
return new_word
def prepare_vocabs(path):
# read dictionary of words
with open(path, "r") as f:
words = f.readline().strip().split(" ")
words = [re.sub("[^a-z'.]+", "", word) for word in words]
known_words = set(list(map(lambda x: transform_asg(x) + "|", words))) - {""}
words.append("</s>")
known_words_original = set(words) - {""}
known_words_original = numpy.array(list(known_words_original))
return known_words, known_words_original
def prepare_vocabs_convlm(path):
# read dictionary of words
words = []
with open(path, "r") as f:
for line in f:
word = line.strip().split(" ")[0]
words.append(re.sub("[^a-z'.]+", "", word))
known_words = set(list(map(lambda x: transform_asg(x) + "|", words))) - {""}
words.append("</s>")
known_words_original = set(words) - {""}
known_words_original = numpy.array(list(known_words_original))
return known_words, known_words_original
| 2,637 | 28.640449 | 84 |
py
|
wav2letter
|
wav2letter-main/recipes/lexicon_free/utilities/compute_upper_ppl_kenlm.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Compute upper limit on word perplexity for kenlm ngram models
Command : python3 compute_upper_ppl_kenlm.py --vocab_file [...] --kenlm_preds [...]
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import numpy
from utils import transform_asg
def compute_upper_limit_ppl_for_kenlm(known_words_file, kenlm_file):
with open(known_words_file, "r") as f:
known_words = set(list(map(transform_asg, f.readline().strip().split(" "))))
with open(kenlm_file, "r") as f:
sum_logp = 0
sum_logp_unk = 0
n_words = 0
n_words_unk = 0
n_letters = 0
for line in f:
if "Total" not in line:
continue
line = line.strip().split("\t")
word = ""
word_logp = 0
for token in line:
token_val = token.split("=")[0]
logp = float(token.split(" ")[-1])
if token_val == "|":
if word in known_words:
sum_logp += word_logp + numpy.log(numpy.power(10, logp))
n_words += 1
else:
sum_logp_unk += word_logp + numpy.log(numpy.power(10, logp))
n_words_unk += 1
word = ""
word_logp = 0
elif token_val == "</s>":
sum_logp += numpy.log(numpy.power(10, logp))
n_words += 1
else:
word += token_val
word_logp += numpy.log(numpy.power(10, logp))
n_letters += 1
if token_val == "</s>":
break
loss_letter = -(sum_logp + sum_logp_unk) / n_letters
ppl_word_no_unk = numpy.exp(-sum_logp / n_words)
ppl_word_unk = numpy.exp(-sum_logp_unk / n_words_unk)
ppl_word = numpy.exp(-(sum_logp + sum_logp_unk) / (n_words + n_words_unk))
print(
"Letter loss: {}, letter perplexity: {}".format(
loss_letter, numpy.exp(loss_letter)
)
)
print("Upper word perplexity for all words: {}".format(ppl_word))
print("Upper word perplexity for unknown words: {}".format(ppl_word_unk))
print(
"(Reported in the paper) "
"Upper word perplexity for known words: {}".format(ppl_word_no_unk)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upper limit on word perplexity for kenlm predictions"
)
parser.add_argument(
"--vocab_file",
help="vocabulary of known words, use file "
"from --limit_vocab_file during word kenLM training.",
)
parser.add_argument(
"--kenlm_preds", help="file with kenlm predictions after query run"
)
args = parser.parse_args()
print("Evaluate file {}".format(args.kenlm_preds))
compute_upper_limit_ppl_for_kenlm(args.vocab_file, args.kenlm_preds)
| 3,285 | 32.876289 | 84 |
py
|
wav2letter
|
wav2letter-main/recipes/lexicon_free/utilities/convlm_utils.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from fairseq import options
from fairseq.models.fconv import FConvDecoder
EOS = '</s>'
UNK = '<unk>'
EOSIDX = 2
UNKIDX = 3
def compute_new_state(model_state):
new_state = dict()
for key, val in model_state["model"].items():
if "1.weight" in key and "adaptive" in key:
new_state[
".".join(key.split(".")[1:]).replace("1.weight", "2.weight")
] = val
else:
new_state[".".join(key.split(".")[1:])] = val
return new_state
def load_char_model_20B(pytorch_model_path, fairseq_dict, dataset_type):
layer = eval(
"[(512, 5)] + [(128, 1, 0), (128, 5, 0), (256, 1, 3)] * 3 + "
"[(256, 1, 0), (256, 5, 0), (512, 1, 3)] * 3 + "
"[(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3 + "
"[(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 9 + "
"[(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
)
model_state = torch.load(pytorch_model_path)
convLM_char = FConvDecoder(
fairseq_dict,
embed_dim=256,
out_embed_dim=256,
max_positions=1024,
convolutions=layer,
dropout=0.1,
share_embed=False,
attention=False,
positional_embeddings=False,
adaptive_softmax_cutoff=None,
adaptive_softmax_dropout=0,
).cuda()
convLM_char.load_state_dict(compute_new_state(model_state))
convLM_char.eval()
return convLM_char
def load_char_model_14B(pytorch_model_path, fairseq_dict, dataset_type):
layer = eval(
"[(512, 5)] + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3 + "
"[(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3 + "
"[(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6 + "
"[(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
)
model_state = torch.load(pytorch_model_path)
convLM_char = FConvDecoder(
fairseq_dict,
embed_dim=128,
out_embed_dim=128,
max_positions=1024,
convolutions=layer,
dropout=0.1,
share_embed=False,
attention=False,
positional_embeddings=False,
adaptive_softmax_cutoff=None,
adaptive_softmax_dropout=0,
).cuda()
convLM_char.load_state_dict(compute_new_state(model_state))
convLM_char.eval()
return convLM_char
def load_word_model(pytorch_model_path, fairseq_dict, dataset_type):
layer = eval(
"[(512, 5)] + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3 + "
"[(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3 + "
"[(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6 + "
"[(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
)
model_state = torch.load(pytorch_model_path)
if dataset_type == "wsj":
cutoff = "10000,50000,100000"
elif dataset_type == "ls":
cutoff = "10000,50000,200000"
else:
cutoff = ""
convLM = FConvDecoder(
fairseq_dict,
embed_dim=128,
out_embed_dim=128,
max_positions=1024,
convolutions=layer,
dropout=0.1,
share_embed=False,
attention=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(options.eval_str_list(cutoff, type=int)),
adaptive_softmax_dropout=0,
).cuda()
convLM.load_state_dict(compute_new_state(model_state))
convLM.eval()
convLM.adaptive_softmax.eval()
return convLM
def decodeInputText(sentences, token_indices_dict):
sentences_decoded = []
for line in sentences:
sentences_decoded.append(
[
token_indices_dict[UNK]
if token not in token_indices_dict
else token_indices_dict[token]
for token in line.split(" ")
]
)
return sentences_decoded
def build_token_index_correspondence(dict_fname):
# follow fairseq
token_indices_dict = dict()
indices_token_dict = dict()
with open(dict_fname, "r") as f:
for index, line in enumerate(f):
token_indices_dict[line.strip().split(" ")[0]] = index + 4
indices_token_dict[index + 4] = line.strip().split(" ")[0]
token_indices_dict[EOS] = EOSIDX
indices_token_dict[EOSIDX] = EOS
token_indices_dict[UNK] = UNKIDX
indices_token_dict[UNKIDX] = UNK
return token_indices_dict, indices_token_dict
| 4,597 | 29.052288 | 82 |
py
|
wav2letter
|
wav2letter-main/recipes/lexicon_free/utilities/compute_lower_ppl_convlm.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Compute upper and lower limits on word perplexity for convlm models
Command : python3 compute_lower_ppl_convlm.py --model [...] --dict [...] \
--word_model [...] --word_dict [...] \
--text [...] --model_type char14B --dataset_type ls
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import numpy
import torch
from convlm_utils import (
EOS,
EOSIDX,
UNKIDX,
build_token_index_correspondence,
decodeInputText,
load_char_model_14B,
load_char_model_20B,
load_word_model,
)
from fairseq.data import Dictionary
from utils import prepare_vocabs_convlm, transform_asg, transform_asg_back
# reusing previous states for some reason is slower than reevaluating the full sentence.
# TODO speedup with batching and using previous state
def compute_word_logprob(model, current_state, target_word, token_index_dict):
if target_word == EOS:
x = torch.LongTensor(current_state).reshape(1, len(current_state)).cuda()
with torch.no_grad():
y = model.forward(x)[0]
logprobs = torch.nn.functional.log_softmax(y, 2).detach().cpu().numpy()[0]
return logprobs[-1, EOSIDX]
else:
additional_state = [token_index_dict[token] for token in list(target_word)]
with torch.no_grad():
x = (
torch.LongTensor(current_state + additional_state[:-1])
.reshape(1, len(current_state) + len(additional_state) - 1)
.cuda()
)
y = model.forward(x)[0]
logprobs = torch.nn.functional.log_softmax(y, 2).detach().cpu().numpy()[0]
return numpy.sum(
logprobs[-len(additional_state) :][
numpy.arange(len(additional_state)), additional_state
]
)
def compute_denominator(model, current_state, words, token_index_dict):
preds = [
compute_word_logprob(model, current_state, word, token_index_dict)
for word in words
]
assert len(preds) != 0, "Invalid denominator"
max_pred = numpy.max(preds)
return max_pred + numpy.log(numpy.sum(numpy.exp(preds - max_pred)))
def compute_words_model_pdf_mass(
word_probs, current_state_position, known_words, known_words_decoded
):
probs = word_probs[current_state_position, known_words_decoded]
indices = numpy.argsort(-probs)
# unk word is not added to this pdf mass, sometimes its prob is huge
# so take percentile from known word pdf
probs_sum = numpy.sum(probs)
top = numpy.where(numpy.cumsum(probs[indices]) > 0.95 * probs_sum)[0][0]
return [
transform_asg(w) + "|" if w != EOS else w for w in known_words[indices[:top]]
]
def compute_ppl_lower_limit(
model,
word_model,
sentences,
known_words,
known_words_original,
known_words_original_decoded,
indices_token_dict,
token_indices_dict,
):
n_words = 0
unk_n_words = 0
ppl = 0.0
ppl_lower = 0.0
n_logging = len(sentences)
for n, sentence in enumerate(sentences):
current_state = [EOSIDX]
current_word = ""
current_word_state_position = 0
addition_state = []
wordLM_sentence = (
"".join([indices_token_dict[idx] for idx in sentence])
.replace("|", " ")
.strip()
)
wordLM_sentence = [
transform_asg_back(word) for word in wordLM_sentence.split(" ")
]
wordLM_sentence_decoded = [EOSIDX] + [
UNKIDX if word not in word_indices_dict else word_indices_dict[word]
for word in wordLM_sentence
]
with torch.no_grad():
x = (
torch.LongTensor(wordLM_sentence_decoded)
.reshape(1, len(wordLM_sentence_decoded))
.cuda()
)
y = word_model.forward(x)[0]
words_probs = numpy.exp(
word_model.adaptive_softmax.get_log_prob(y, None)
.detach()
.cpu()
.numpy()[0]
)
for token_idx in sentence:
current_word += indices_token_dict[token_idx]
addition_state.append(token_idx)
if indices_token_dict[token_idx] == "|":
if current_word in known_words:
n_words += 1
pdf_mass_words = set(
compute_words_model_pdf_mass(
words_probs,
current_word_state_position,
known_words_original,
known_words_original_decoded,
)
)
if current_word not in pdf_mass_words:
pdf_mass_words.add(current_word)
word_score = compute_word_logprob(
model, current_state, current_word, token_indices_dict
)
ppl += word_score
ppl_lower += word_score - compute_denominator(
model, current_state, pdf_mass_words, token_indices_dict
)
else:
unk_n_words += 1
current_word = ""
current_state += addition_state
addition_state = []
current_word_state_position += 1
# process eos
word_score = compute_word_logprob(model, current_state, EOS, token_indices_dict)
n_words += 1
ppl += word_score
pdf_mass_words = set(
compute_words_model_pdf_mass(
words_probs,
current_word_state_position,
known_words_original,
known_words_original_decoded,
)
)
if EOS not in pdf_mass_words:
pdf_mass_words.add(EOS)
ppl_lower += word_score - compute_denominator(
model, current_state, pdf_mass_words, token_indices_dict
)
if n % 10 == 0:
print(
"Evaluated",
n,
"sentences among",
n_logging,
"upper limit perplexity",
numpy.exp(-ppl / n_words),
"lower limit perplexity",
numpy.exp(-ppl_lower / n_words),
"number of words",
n_words,
flush=True,
)
print("Final loss", ppl, "loss lower", ppl_lower)
print("Upper limit on perplexity:", numpy.exp(-ppl / n_words))
print("Lower limit on perplexity:", numpy.exp(-ppl_lower / n_words))
print("Total number of words:", n_words, "unknown words:", unk_n_words)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upper and lower limits on word perplexity for convlm models"
)
parser.add_argument("--model", help="path to convlm model")
parser.add_argument("--word_model", help="path to convlm model")
parser.add_argument("--dict", help="path to convlm dict file in data")
parser.add_argument(
"--word_dict", help="path to convlm word convlm dict file in data"
)
parser.add_argument(
"--text", help="file to evaluate, in necessary format for model"
)
parser.add_argument("--model_type", help='"char14B" or "char20B"')
parser.add_argument("--dataset_type", help='"ls" or "wsj"', default="ls")
args = parser.parse_args()
print("Evaluate file {}".format(args.text))
token_indices_dict, indices_token_dict = build_token_index_correspondence(args.dict)
word_indices_dict, indices_word_dict = build_token_index_correspondence(
args.word_dict
)
known_words, known_words_original = prepare_vocabs_convlm(args.word_dict)
known_words_original_decoded = numpy.array(
[
UNKIDX if w not in word_indices_dict else word_indices_dict[w]
for w in known_words_original
]
)
with open(args.text, "r") as f:
sentences = [line.strip() for line in f]
input_data = decodeInputText(sentences, token_indices_dict)
fairseq_dict = Dictionary.load(args.dict)
word_fairseq_dict = Dictionary.load(args.word_dict)
word_model = load_word_model(args.word_model, word_fairseq_dict, args.dataset_type)
if "14B" in args.model_type:
char_model = load_char_model_14B(args.model, fairseq_dict, args.dataset_type)
else:
char_model = load_char_model_20B(args.model, fairseq_dict, args.dataset_type)
compute_ppl_lower_limit(
char_model,
word_model,
input_data,
known_words,
known_words_original,
known_words_original_decoded,
indices_token_dict,
token_indices_dict,
)
| 9,068 | 33.48289 | 88 |
py
|
wav2letter
|
wav2letter-main/recipes/lexicon_free/wsj/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to prepare recipe to train/eval model on Librispeech in wav2letter++ pipelines
Please install `sph2pipe` on your own -
see https://www.ldc.upenn.edu/language-resources/tools/sphere-conversion-tools \
with commands :
wget https://www.ldc.upenn.edu/sites/www.ldc.upenn.edu/files/ctools/sph2pipe_v2.5.tar.gz
tar -xzf sph2pipe_v2.5.tar.gz && cd sph2pipe_v2.5
gcc -o sph2pipe *.c -lm
Command : python3 prepare_data.py --wsj0 [...]/WSJ0/media \
--wsj1 [...]/WSJ1/media --data_dst [...] --model_dst [...]
--sph2pipe [...]/sph2pipe_v2.5/sph2pipe
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import functools
import os
import re
import sys
from collections import defaultdict
import numpy
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(FILE_DIR, "../utilities"))
from utils import convert_words_to_letters_asg_rep2
# FILE = __file__
def compare(x, y):
# sort by counts, if counts equal then sort in lex order
if x[1] > y[1]:
return -1
elif x[1] == y[1]:
if x[0] < y[0]:
return -1
else:
return 1
else:
return 1
def remap_words_with_same_spelling(data_dst, decoder_dst):
words_dict = defaultdict(int)
spellings_dict = defaultdict(set)
spellings_appearence_dict = defaultdict(int)
with open(os.path.join(data_dst, "lists/si284.lst"), "r") as flist:
for line in flist:
for word in line.strip().split(" ")[3:]:
word = re.sub(r"\(\S+\)", "", word) # not pronounced
words_dict[word] += 1
spelling = re.sub("[^a-z'.]+", "", word)
spellings_dict[spelling].update([word])
spellings_appearence_dict[spelling] += 1
with open(os.path.join(data_dst, "text/lm.txt"), "r") as flm:
for line in flm:
for word in line.strip().split(" "):
word = re.sub(r"\(\S+\)", "", word) # not pronounced
spelling = re.sub("[^a-z'.]+", "", word)
spellings_dict[spelling].update([word])
spellings_appearence_dict[spelling] += 1
sorted_spellings = sorted(
spellings_appearence_dict.items(), key=functools.cmp_to_key(compare)
)
special_mapping = {"al": "al-", "st": "st", "nd": "nd", "rd": "rd"}
remap_result = dict()
with open(os.path.join(decoder_dst, "dict-remap.txt"), "w") as fmap:
for spelling, _ in sorted_spellings:
words_count = {w: words_dict[w] for w in spellings_dict[spelling]}
sorted_words = sorted(
words_count.items(), key=functools.cmp_to_key(compare)
)
for word, _ in sorted_words:
remap_result[word] = (
sorted_words[0][0]
if spelling not in special_mapping
else special_mapping[spelling]
)
fmap.write("{} {}\n".format(word, remap_result[word]))
return remap_result
def get_spelling(word):
spelling = re.sub(r"\(\S+\)", "", word) # not pronounced
spelling = re.sub("[^a-z'.]+", "", spelling)
return spelling
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Librispeech Dataset creation.")
parser.add_argument("--wsj0", help="top level directory containing all WSJ0 discs")
parser.add_argument("--wsj1", help="top level directory containing all WSJ1 discs")
parser.add_argument(
"--data_dst", help="data destination directory", default="./wsj"
)
parser.add_argument(
"--model_dst",
help="model auxilary files destination directory",
default="./conv_glu_librispeech_char",
)
parser.add_argument(
"--wsj1_type",
help="if you are using larger corpus LDC94S13A, set parameter to `LDC94S13A`",
default="LDC94S13B",
)
parser.add_argument(
"--sph2pipe",
help="path to sph2pipe executable",
default="./sph2pipe_v2.5/sph2pipe",
)
parser.add_argument("--kenlm", help="location to installed kenlm directory")
parser.add_argument(
"-p", "--process", help="# of process for Multiprocessing", default=8, type=int
)
args = parser.parse_args()
os.system(
"python3 {}/../../../data/wsj/prepare.py "
"--wsj0 {} --wsj1 {} --sph2pipe {} --wsj1_type {} --dst {} -p {}".format(
os.path.dirname(os.path.abspath(__file__)),
args.wsj0,
args.wsj1,
args.sph2pipe,
args.wsj1_type,
args.data_dst,
args.process,
)
)
lists_path = os.path.join(args.data_dst, "lists")
am_path = os.path.join(args.model_dst, "am")
lm_data_path = os.path.join(args.data_dst, "text/lm.txt")
decoder_path = os.path.join(args.model_dst, "decoder")
os.makedirs(am_path, exist_ok=True)
os.makedirs(decoder_path, exist_ok=True)
# Generating am/*
print("Generating tokens.lst for acoustic model training", flush=True)
with open(os.path.join(am_path, "tokens.lst"), "w") as f_tokens:
f_tokens.write("|\n")
f_tokens.write("'\n")
f_tokens.write(".\n")
for alphabet in range(ord("a"), ord("z") + 1):
f_tokens.write(chr(alphabet) + "\n")
print(
"Generating lexicon.lst (word -> tokens) for acoustic model training",
flush=True,
)
# generating remapping for words:
# among words with the same spelling take the most frequent word
# use only this word in the lexicon
# do this substitution for the dev during
# acoustic model training for WER computation
remap_dict = remap_words_with_same_spelling(args.data_dst, decoder_path)
with open(os.path.join(lists_path, "si284.lst"), "r") as fin, open(
os.path.join(am_path, "si284.lst.remap"), "w"
) as fout:
for line in fin:
line = line.strip().split(" ")
for index in range(3, len(line)):
word = re.sub(r"\(\S+\)", "", line[index])
line[index] = remap_dict[word]
fout.write(" ".join(line) + "\n")
# words used in training/eval to prepare spelling
words_set = set()
for name in [
os.path.join(am_path, "si284.lst.remap"),
os.path.join(lists_path, "nov93dev.lst"),
]:
with open(name, "r") as flist:
for line in flist:
transcription = line.strip().split(" ")[3:]
words_set.update(transcription)
print(
"Writing lexicon file - {}...".format(
os.path.join(am_path, "lexicon_si284+nov93dev.txt")
),
flush=True,
)
with open(os.path.join(am_path, "lexicon_si284+nov93dev.txt"), "w") as f:
for word in words_set:
spelling = get_spelling(word)
assert re.match(
r"[a-z'.]+", spelling
), "invalid spelling for word '{}'".format(word)
f.write(
"{word}\t{tokens} |\n".format(
word=word, tokens=" ".join(list(spelling))
)
)
# Generating decoder/*
# prepare lexicon word -> tokens spelling
# write words to lexicon.txt file
print("Generating lexicon.txt (word -> tokens) for decoding", flush=True)
lex_file = os.path.join(decoder_path, "lexicon.lst")
print("Writing lexicon file - {}...".format(lex_file), flush=True)
with open(lex_file, "w") as f:
for word in numpy.unique(list(remap_dict.values())):
if len(re.findall(r"\d", word)) > 0:
continue
spelling = get_spelling(word)
if spelling != "":
if re.match("^[a-z'.]+$", spelling):
f.write("{w}\t{s} |\n".format(w=word, s=" ".join(spelling)))
else:
print('Ignore word "{}" in lexicon'.format(word))
# Prepare data for char lm training/evaluation
if os.path.exists(os.path.join(decoder_path, "char_lm_data.train")):
print(
"Skip generation of {}. Please remove the file to regenerate it".format(
os.path.join(decoder_path, "char_lm_data.train")
)
)
else:
convert_words_to_letters_asg_rep2(
os.path.join(args.data_dst, "text/lm.txt"),
os.path.join(decoder_path, "char_lm_data.train"),
)
convert_words_to_letters_asg_rep2(
os.path.join(args.data_dst, "text/nov93dev.txt"),
os.path.join(decoder_path, "char_lm_data.nov93dev"),
)
with open(os.path.join(args.data_dst, "text/nov93dev.txt"), "r") as f, \
open(os.path.join(decoder_path, "word_lm_data.nov93dev"), "w") as fout:
for line in f:
result = []
for word in line.strip().split(" "):
word = re.sub("[^a-z'.]+", "", word)
if word != "":
result.append(word)
fout.write(" ".join(result) + "\n")
print("Done!", flush=True)
| 9,347 | 34.953846 | 90 |
py
|
wav2letter
|
wav2letter-main/recipes/lexicon_free/librispeech/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to prepare recipe to train/eval model on Librispeech in wav2letter++ pipelines
Command : python3 prepare.py --data_dst [...] --model_dst [...] --kenlm [...]/kenlm/
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import sys
from collections import defaultdict
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(FILE_DIR, "../utilities"))
from utils import convert_words_to_letters_asg_rep2
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Librispeech Dataset creation.")
parser.add_argument(
"--data_dst", help="data destination directory", default="./librispeech"
)
parser.add_argument(
"--model_dst",
help="model auxilary files destination directory",
default="./conv_glu_librispeech_char",
)
parser.add_argument("--kenlm", help="location to installed kenlm directory")
parser.add_argument(
"-p", "--process", help="# of process for Multiprocessing", default=8, type=int
)
args = parser.parse_args()
os.system(
"python3 {}/../../../data/librispeech/prepare.py --dst {} -p {}".format(
os.path.dirname(os.path.abspath(__file__)), args.data_dst, args.process
)
)
subpaths = {
"train": ["train-clean-100", "train-clean-360", "train-other-500"],
"dev": ["dev-clean", "dev-other"],
"test": ["test-clean", "test-other"],
}
lists_path = os.path.join(args.data_dst, "lists")
am_path = os.path.join(args.model_dst, "am")
decoder_path = os.path.join(args.model_dst, "decoder")
os.makedirs(am_path, exist_ok=True)
os.makedirs(decoder_path, exist_ok=True)
# Generating am/*
print("Generating tokens.lst for acoustic model training", flush=True)
with open(os.path.join(am_path, "tokens.lst"), "w") as fout:
fout.write("|\n")
fout.write("'\n")
for alphabet in range(ord("a"), ord("z") + 1):
fout.write(chr(alphabet) + "\n")
print(
"Generating lexicon.lst (word -> tokens) for acoustic model training",
flush=True,
)
word_dict = defaultdict(set)
for key, names in subpaths.items():
for name in names:
with open(os.path.join(lists_path, name + ".lst"), "r") as flist:
for line in flist:
transcription = line.strip().split(" ")[3:]
word_dict[key].update(transcription)
lexicon_words = sorted(word_dict["train"] | word_dict["dev"])
with open(os.path.join(am_path, "lexicon_train+dev.lst"), "w") as f:
for word in lexicon_words:
f.write(
"{word}\t{tokens} |\n".format(word=word, tokens=" ".join(list(word)))
)
# Prepare data for char lm training/evaluation
if os.path.exists(os.path.join(decoder_path, "char_lm_data.train")):
print(
"Skip generation of {}. Please remove the file to regenerate it".format(
os.path.join(decoder_path, "char_lm_data.train")
)
)
else:
convert_words_to_letters_asg_rep2(
os.path.join(args.data_dst, "text/librispeech-lm-norm.txt.lower.shuffle"),
os.path.join(decoder_path, "char_lm_data.train"),
)
convert_words_to_letters_asg_rep2(
os.path.join(args.data_dst, "text/dev-clean.txt"),
os.path.join(decoder_path, "char_lm_data.dev-clean"),
)
convert_words_to_letters_asg_rep2(
os.path.join(args.data_dst, "text/dev-other.txt"),
os.path.join(decoder_path, "char_lm_data.dev-other"),
)
# Download official 4gram model and its lexicon
cmd = [
"python3 {}/../../utilities/prepare_librispeech_official_lm.py",
"--dst {}",
"--kenlm {}",
]
os.system(
" ".join(cmd).format(
os.path.dirname(os.path.abspath(__file__)), decoder_path, args.kenlm
)
)
additional_set = {
"bennydeck",
"fibi",
"moling",
"balvastro",
"hennerberg",
"ambrosch",
"quilter's",
"yokul",
"recuperations",
"dowle",
"buzzer's",
"tarrinzeau",
"bozzle's",
"riverlike",
"vendhya",
"sprucewood",
"macklewain",
"macklewain's",
"khosala",
"derivatively",
"gardar",
"untrussing",
"rathskellers",
"telemetering",
"drouet's",
"sneffels",
"glenarvan's",
"congal's",
"d'avrigny",
"rangitata",
"wahiti",
"presty",
"quinci",
"troke",
"westmere",
"saknussemm",
"dhourra",
"irolg",
"bozzle",
"boolooroo",
"collander",
"finnacta",
"canyou",
"myrdals",
"shimerdas",
"impara",
"synesius's",
"brandd",
"bennydeck's",
"weiser",
"noirtier",
"verloc",
"shimerda",
"sudvestr",
"frierson's",
"bergez",
"gwynplaine's",
"breadhouse",
"mulrady",
"shampooer",
"ossipon",
"shoplets",
"delectasti",
"herbivore",
"lacquey's",
"pinkies",
"theosophies",
"razetta",
"magazzino",
"yundt",
"testbridge",
"officinale",
"burgoynes",
"novatians",
"sandyseal",
"chaba",
"beenie",
"congal",
"doma",
"brau",
"mainhall",
"verloc's",
"zingiber",
"vinos",
"bush'",
"yulka",
"bambeday",
"darfhulva",
"olbinett",
"gingle",
"nicless",
"stupirti",
"ossipon's",
"skint",
"ruggedo's",
"tishimingo",
"ganny",
"delaunay's",
"tumble's",
"birdikins",
"hardwigg",
"homoiousios",
"docetes",
"daguerreotypist",
"satisfier",
"heuchera",
"parrishes",
"homoousios",
"trampe",
"bhunda",
"brion's",
"fjordungr",
"hurstwood",
"corncakes",
"abalone's",
"libano",
"scheiler",
}
with open(os.path.join(decoder_path, "lexicon.txt"), "a") as flex:
for word in additional_set:
flex.write("{}\t{}\n".format(word, " ".join(list(word)) + " |"))
os.rename(
os.path.join(decoder_path, "lexicon.txt"),
os.path.join(decoder_path, "lexicon.lst"),
)
# prepare oov and in vocabulary samples lists
decoder_lexicon_words = []
with open(os.path.join(decoder_path, "lexicon.lst"), "r") as flex:
for line in flex:
decoder_lexicon_words.append(line.strip().split("\t")[0])
decoder_lexicon_words = set(decoder_lexicon_words)
for list_name in ["test-clean.lst", "test-other.lst"]:
with open(os.path.join(lists_path, list_name), "r") as flist, open(
os.path.join(decoder_path, list_name + ".oov"), "w"
) as foov, open(os.path.join(decoder_path, list_name + ".inv"), "w") as finv:
for line in flist:
sample_words = set(line.strip().split(" ")[3:])
if len(sample_words - decoder_lexicon_words) > 0:
foov.write(line)
else:
finv.write(line)
print("Done!", flush=True)
| 7,813 | 28.048327 | 87 |
py
|
wav2letter
|
wav2letter-main/recipes/utilities/prepare_librispeech_official_lm.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Downloading and preparation of official Librispeech 4-gram language model.
Please install `kenlm` on your own - https://github.com/kpu/kenlm
Command : python3 prepare_librispeech_official_lm.py --dst [...] --kenlm [...]/kenlm/
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Librispeech official lm creation.")
parser.add_argument(
"--dst", help="data destination directory", default="./decoder"
)
parser.add_argument("--kenlm", help="location to installed kenlm directory")
args = parser.parse_args()
decoder_path = args.dst
os.makedirs(decoder_path, exist_ok=True)
# Generating decoder/*
lm = "4-gram"
assert os.path.isdir(str(args.kenlm)), "kenlm directory not found - '{d}'".format(
d=args.kenlm
)
print("Downloading Librispeech official LM model...\n", flush=True)
arpa_file = os.path.join(decoder_path, lm + ".arpa")
if not os.path.exists(arpa_file):
os.system(
"wget -c -O - http://www.openslr.org/resources/11/{lm}.arpa.gz | "
"gunzip -c > {fout}".format(lm=lm, fout=arpa_file)
)
else:
print("Arpa file {} exist, skip its downloading.".format(arpa_file))
# temporary arpa file in lowercase
print("Saving ARPA LM file in binary format ...\n", flush=True)
os.system(
"cat {arpa} | tr '[:upper:]' '[:lower:]' > {arpa}.tmp".format(arpa=arpa_file)
)
binary = os.path.join(args.kenlm, "build", "bin", "build_binary")
os.system(
"{bin} {farpa}.tmp {fbin}".format(
bin=binary, farpa=arpa_file, fbin=arpa_file.replace(".arpa", ".bin")
)
)
os.remove(os.path.join(arpa_file + ".tmp"))
# prepare lexicon word -> tokens spelling
# write words to lexicon.txt file
lex_file = os.path.join(decoder_path, "lexicon.txt")
print("Writing Lexicon file - {}...".format(lex_file))
with open(lex_file, "w") as f:
# get all the words in the arpa file
with open(arpa_file, "r") as arpa:
for line in arpa:
# verify if the line corresponds to unigram
if not re.match(r"[-]*[0-9\.]+\t\S+\t*[-]*[0-9\.]*$", line):
continue
word = line.split("\t")[1]
word = word.strip().lower()
if word == "<unk>" or word == "<s>" or word == "</s>":
continue
assert re.match("^[a-z']+$", word), "invalid word - {w}".format(w=word)
f.write("{w}\t{s} |\n".format(w=word, s=" ".join(word)))
print("Done!", flush=True)
| 2,980 | 35.802469 | 87 |
py
|
wav2letter
|
wav2letter-main/recipes/utilities/prepare_librispeech_wp_and_official_lexicon.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to prepare recipe to train/eval model on Librispeech in wav2letter++ pipelines
Command : python3 prepare.py --data_dst [...] --model_dst [...] --wp 10000 --nbest 10
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import re
from collections import defaultdict
import sentencepiece as spm
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Librispeech Dataset creation.")
parser.add_argument(
"--data_dst", help="data destination directory", default="./librispeech"
)
parser.add_argument(
"--model_dst",
help="model auxilary files destination directory",
default="./model",
)
parser.add_argument(
"-p", "--process", help="# of process for Multiprocessing", default=8, type=int
)
parser.add_argument("--wp", help="number of word pieces", default=10000, type=int)
parser.add_argument(
"--nbest",
help="number of best segmentations for each word (or numbers comma separated)",
default="10",
)
args = parser.parse_args()
os.system(
"python3 {}/../../../data/librispeech/prepare.py --dst {} -p {}".format(
os.path.dirname(os.path.abspath(__file__)), args.data_dst, args.process
)
)
subpaths = {
"train": ["train-clean-100", "train-clean-360", "train-other-500"],
"dev": ["dev-clean", "dev-other"],
"test": ["test-clean", "test-other"],
}
lists_path = os.path.join(args.data_dst, "lists")
am_path = os.path.join(args.model_dst, "am")
decoder_path = os.path.join(args.model_dst, "decoder")
os.makedirs(am_path, exist_ok=True)
os.makedirs(decoder_path, exist_ok=True)
# Generating am/*
num_wordpieces = args.wp
train_all_text = os.path.join(am_path, "train.txt")
prefix = "librispeech-train-all-unigram-{}".format(num_wordpieces)
prefix = os.path.join(am_path, prefix)
vocab_name = prefix + ".vocab"
model_name = prefix + ".model"
# prepare data
print("Preparing tokens and lexicon for acoustic model...\n", flush=True)
word_dict = defaultdict(set)
with open(train_all_text, "w") as ftext:
for key, names in subpaths.items():
for name in names:
with open(os.path.join(lists_path, name + ".lst"), "r") as flist:
for line in flist:
transcription = line.strip().split(" ")[3:]
if key == "train":
ftext.write(" ".join(transcription) + "\n")
word_dict[key].update(transcription)
lexicon_words_train = sorted(word_dict["train"])
lexicon_words = sorted(word_dict["train"] | word_dict["dev"])
# train
print("Computing word pieces...\n", flush=True)
train_cmd = (
"--input={input} --model_prefix={prefix} --vocab_size={sz}"
" --character_coverage=1.0 --model_type=unigram"
" --split_by_unicode_script=false".format(
input=train_all_text, prefix=prefix, sz=num_wordpieces
)
)
spm.SentencePieceTrainer.Train(train_cmd)
# word piece dictionary
print("Creating word piece list...\n", flush=True)
exclude_list = {"<unk>", "<s>", "</s>"}
with open(vocab_name.replace(".vocab", ".tokens"), "w") as fvocab_filt:
with open(vocab_name, "r", encoding="utf-8") as fvocab:
for line in fvocab:
val, _ = line.strip().split("\t", 1)
if val not in exclude_list:
fvocab_filt.write(val.replace("\u2581", "_") + "\n")
# Generating decoder/*
lm = "4-gram"
print("Downloading Librispeech official LM model...\n", flush=True)
arpa_file = os.path.join(decoder_path, lm + ".arpa")
if not os.path.exists(arpa_file):
os.system(
"wget -c -O - http://www.openslr.org/resources/11/{lm}.arpa.gz | "
"gunzip -c > {fout}".format(lm=lm, fout=arpa_file)
)
else:
print("Arpa file {} exist, skip its downloading.".format(arpa_file))
# temporary arpa file in lowercase
os.system(
"cat {arpa} | tr '[:upper:]' '[:lower:]' > {arpa}.lower".format(arpa=arpa_file)
)
lm_words = []
with open(arpa_file + ".lower", "r") as arpa:
for line in arpa:
# verify if the line corresponds to unigram
if not re.match(r"[-]*[0-9\.]+\t\S+\t*[-]*[0-9\.]*$", line):
continue
word = line.split("\t")[1]
word = word.strip().lower()
if word == "<unk>" or word == "<s>" or word == "</s>":
continue
assert re.match("^[a-z']+$", word), "invalid word - {w}".format(w=word)
lm_words.append(word)
# word -> word piece lexicon for loading targets
print("Creating word -> word pieces lexicon...\n", flush=True)
sp = spm.SentencePieceProcessor()
sp.Load(model_name)
for nbest in args.nbest.split(","):
nbest = int(nbest)
lexicon_name = "librispeech-train+dev-unigram-{sz}-nbest{n}.lexicon".format(
sz=num_wordpieces, n=nbest
)
lexicon_name_train = "librispeech-train-unigram-{sz}-nbest{n}.lexicon".format(
sz=num_wordpieces, n=nbest
)
with open(os.path.join(am_path, lexicon_name), "w") as f_lexicon, open(
os.path.join(am_path, lexicon_name_train), "w"
) as f_lexicon_train:
for word in lexicon_words:
wps = sp.NBestEncodeAsPieces(word, nbest)
for wp in wps: # the order matters for our training
f_lexicon.write(
word
+ "\t"
+ " ".join([w.replace("\u2581", "_") for w in wp])
+ "\n"
)
if word in lexicon_words_train:
f_lexicon_train.write(
word
+ "\t"
+ " ".join([w.replace("\u2581", "_") for w in wp])
+ "\n"
)
nbest = int(nbest)
decoder_lexicon_name = "decoder-unigram-{sz}-nbest{n}.lexicon".format(
sz=num_wordpieces, n=nbest
)
with open(os.path.join(decoder_path, decoder_lexicon_name), "w") as f_lexicon:
for word in lm_words:
wps = sp.NBestEncodeAsPieces(word, nbest)
for wp in wps: # the order matters for our training
f_lexicon.write(
word
+ "\t"
+ " ".join([w.replace("\u2581", "_") for w in wp])
+ "\n"
)
print("Done!", flush=True)
| 7,097 | 37.367568 | 87 |
py
|
wav2letter
|
wav2letter-main/recipes/utilities/convlm_serializer/save_pytorch_model.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from collections import defaultdict
import torch
def convert(model_state, key, suffix=""):
string = ""
param = model_state[key]
# param name
string += ".".join(key.split(".")[1:-1]) + suffix + "." + key.split(".")[-1] + " "
change_to_lin_layer = False
if "conv" in key and len(param.shape) == 3:
if ("weight_v" in key and param.shape[0] == 1) or (
"weight_g" in key
and model_state[key.replace("weight_g", "weight_v")].shape[0] == 1
):
change_to_lin_layer = True
if change_to_lin_layer:
# param shapes
string += (
str(len(param.shape) - 1) + " " + " ".join(map(str, param.shape[1:][::-1]))
)
# param matrix
string += " " + " ".join(map(str, param.cpu().numpy()[0].T.flatten()))
else:
# param shapes
string += str(len(param.shape)) + " " + " ".join(map(str, param.shape))
# param matrix
string += " " + " ".join(map(str, param.cpu().numpy().flatten()))
return string
def save_model(pytorch_model_path, dst):
model_state = torch.load(pytorch_model_path)
model_state = model_state["model"]
add_string = ""
prev_key = ""
with open(dst, "w") as f:
projections = defaultdict(list)
for key in model_state:
print("Process param", key)
if "version" in key:
print("Skip", key)
continue
if "projection" in key:
projections[key.split(".")[-2]].append(
convert(model_state, key, "-projection")
)
else:
if prev_key != key.split(".")[2]:
if add_string != "":
f.write(add_string + "\n")
add_string = ""
prev_key = key.split(".")[2]
if key.split(".")[2] in projections:
add_string = "\n".join(projections[key.split(".")[2]])
f.write(convert(model_state, key) + "\n")
if __name__ == "__main__":
print("Converting the model. Usage: save_pytorch_model.py [path/to/model] [dst]")
path = sys.argv[1]
dst = sys.argv[2]
save_model(path, dst)
| 2,325 | 32.710145 | 87 |
py
|
wav2letter
|
wav2letter-main/recipes/joint_training_vox_populi/prepare_data/common_voice_to_wav2letter.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import argparse
import torch
import torchaudio
import string
from tqdm import tqdm
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
from lst_utils import FileInfo, save_lst, get_speakers_list
PUNCTUATION = (string.punctuation + "¡¿").replace("'", "").replace("-", "")
PUNCTUATION += "–…»“«·—’”„"
def get_size_audio_file(path_file: Path) -> float:
r"""
Give the size in hours on the given sequence
"""
try:
info = torchaudio.info(str(path_file))[0]
except RuntimeError:
return 0
return info.length / (info.rate * 3600)
def to_wav2letterFormat(data: torch.tensor, sr: int) -> torch.tensor:
r"""
Wav2letter needs mono 16kHz inputs
"""
if len(data.size()) == 2:
data = data.mean(dim=0, keepdim=True)
elif len(data.size()) == 1:
data = data.view(1, -1)
else:
raise ValueError("Invalid tensor format")
if sr != 16000:
data = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)(data)
data = torch.clamp(data, min=-1.0, max=1.0)
return data
def get_base_data_from_csv(pathTSV) -> List[Dict[str, str]]:
out = []
with open(pathTSV, "r", encoding="utf-8") as tsvfile:
reader = csv.DictReader(tsvfile, dialect="excel-tab")
for row in reader:
speaker_id = row["client_id"]
name = row["path"]
text = row["sentence"]
out.append({"speaker_id": speaker_id, "local_path": name, "text": text})
return out
def norm_text(
text: str,
char_set: Set[str],
replace_set: Optional[Dict[str, str]] = None,
del_set: Optional[Set[str]] = None,
) -> Tuple[bool, str]:
text = text.lower()
if replace_set is not None:
for char_, val in replace_set.items():
text = text.replace(char_, val)
if del_set is not None:
for char_ in del_set:
text = text.replace(char_, "")
valid = True
for char_ in text.replace(" ", ""):
if char_ not in char_set:
valid = False
break
return text, valid
def load_letters(path_letter: Path):
with open(path_letter, "r") as file:
data = file.readlines()
return [x.strip() for x in data]
def get_full_audio_data(
path_dir_audio: Path,
base_data: List[Dict[str, str]],
char_set: Set[str],
replace_set: Optional[Dict[str, str]] = None,
del_set: Optional[Set[str]] = None,
file_extension: str = None,
) -> List[FileInfo]:
output = []
for audio_data in tqdm(base_data, total=len(base_data)):
path_audio = path_dir_audio / audio_data["local_path"]
if file_extension is not None:
path_audio = path_audio.with_suffix(file_extension)
if not path_audio.is_file():
continue
size_sec = get_size_audio_file(path_audio)
text, status = norm_text(
audio_data["text"], char_set, replace_set=replace_set, del_set=del_set
)
output.append(
FileInfo(
size=size_sec,
path_=path_audio,
id_=path_audio.stem,
text=text,
speaker=audio_data["speaker_id"],
)
)
print(f"{len(output)} files found out of {len(base_data)}")
return output
def convert_audio_data(
input_list: List[FileInfo], out_dir_audio: Path
) -> List[FileInfo]:
out_dir_audio.mkdir(exist_ok=True)
output = []
for file_info in tqdm(input_list, total=len(input_list)):
audio, sr = torchaudio.load(str(file_info.path_))
audio = to_wav2letterFormat(audio, sr)
path_out = (out_dir_audio / file_info.path_.name).with_suffix(".flac")
torchaudio.save(str(path_out), audio, 16000)
output.append(
FileInfo(
size=file_info.size,
path_=path_out,
id_=file_info.id_,
text=file_info.text,
speaker=file_info.speaker,
)
)
return output
def load_filter(path_filter: Path) -> List[str]:
with open(path_filter, "r") as f:
return [x.strip() for x in f.readlines()]
def filter_data_by_id(input_lst: List[FileInfo], to_filter: List[str]):
input_lst.sort(key=lambda x: x.id_)
to_filter.sort()
index_filter = 0
len_filter = len(to_filter)
out = []
for lst_data in input_lst:
id_ = lst_data.id_
while index_filter < len_filter and to_filter[index_filter] < id_:
index_filter += 1
if index_filter >= len_filter:
break
if to_filter[index_filter] == id_:
out.append(lst_data)
print(f"{len(out)} files out of {len(to_filter)}")
return out
def main(args):
letters = load_letters(Path(args.path_tokens))
data = get_base_data_from_csv(Path(args.path_tsv))
audio_data = get_full_audio_data(
Path(args.path_audio),
data,
char_set=set(letters),
del_set=PUNCTUATION,
file_extension=args.file_extension,
)
if args.path_filter is not None:
filter_ids = load_filter(Path(args.path_filter))
audio_data = filter_data_by_id(audio_data, filter_ids)
if args.path_conversion is not None:
audio_data = convert_audio_data(audio_data, Path(args.path_conversion))
speakers = get_speakers_list(audio_data)
print(f"{len(speakers)} speakers found")
save_lst(audio_data, args.path_output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Build the lst input files for common voices datasets"
)
parser.add_argument(
"--path_tsv",
type=str,
default="/private/home/mriviere/Common_voices/en/dev.tsv",
help="Path to the target tsv file",
)
parser.add_argument(
"--path_audio",
type=str,
default="/private/home/mriviere/Common_voices/en/clips_16k",
help="Path to the directory containing the audio data",
)
parser.add_argument(
"--path_output",
type=str,
required=True,
help="Output lst file.",
)
parser.add_argument(
"--path_tokens",
type=str,
default="/checkpoint/mriviere/VoxPopuli/segmentation_output/en/en_grapheme.tokens",
help="Path to the token file",
)
parser.add_argument(
"--path_filter",
type=str,
default=None,
help="If given, path to a file containing the files ids to keep.",
)
parser.add_argument(
"--path_conversion",
type=str,
default=None,
help="If given, path to a directory where the audio should be converted",
)
parser.add_argument("--file_extension", type=str, default=".mp3")
args = parser.parse_args()
main(args)
| 7,022 | 26.758893 | 91 |
py
|
wav2letter
|
wav2letter-main/recipes/joint_training_vox_populi/prepare_data/audio.py
| 0 | 0 | 0 |
py
|
|
wav2letter
|
wav2letter-main/recipes/joint_training_vox_populi/prepare_data/lst_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import List, Optional, Set
from dataclasses import dataclass
@dataclass
class FileInfo:
id_: str
path_: Path
size: float
text: str
wer: Optional[str] = None
ler: Optional[str] = None
speaker: Optional[str] = None
def save_lst(lst_data: List[FileInfo], path_out: Path) -> None:
with open(path_out, "w") as file:
for data in lst_data:
file.write(f"{data.id_} {data.path_} {data.size*3600 * 1000} {data.text}\n")
def load_lst(path_file: Path) -> List[FileInfo]:
with open(path_file, "r") as file:
data = [x.strip() for x in file.readlines()]
out = []
for line in data:
tab = line.split()
id_, path_, size = tab[:3]
text = " ".join(tab[3:])
out.append(FileInfo(id_, path_, float(size) / 3600 / 1000, text))
return out
def get_speakers_list(files_data: List[FileInfo]) -> Set[str]:
return {x.speaker for x in files_data}
| 1,154 | 24.666667 | 88 |
py
|
wav2letter
|
wav2letter-main/recipes/joint_training_vox_populi/prepare_data/make_lexicon.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
from typing import Dict, Optional, Set
log = logging.getLogger(__name__)
def has_valid_tokens(word: str, tokens: Set[str]) -> bool:
for c in word:
if c not in tokens:
return False
return True
def read_token_file(path_token_file: Path, eow_char: str) -> Set[str]:
with path_token_file.open("r") as file:
data = [x.strip() for x in file.readlines()]
return {x for x in data if x != eow_char}
def save_lexicon(
lexicon: Set[str], path_out: Path, eow_char: str, tokens: Set[str]
) -> None:
list_lexicon = list(lexicon)
list_lexicon.sort()
with path_out.open("w") as file:
for word in list_lexicon:
if has_valid_tokens(word, tokens):
split_word = " ".join(list(word)) + " " + eow_char + " "
file.write(f"{word} {split_word}\n")
def load_lexicon(path_lexicon: Path) -> Dict[str, str]:
with open(path_lexicon, "r") as file:
data = [x.strip() for x in file.readlines()]
out = {}
for line in data:
word = line[0]
spelling = " ".join(line[1:])
out[word] = spelling
return out
def load_words_from_lst(path_lst: Path, n_best: int, min_occ: int, is_raw_text: bool):
"""
Load words from an input file, which can be in w2l list format or
a file with lines of sentences.
paht_lst: input file
n_best: top n frequent words to keep
min_occ: minimum number of occurrences of each word
is_raw_text: the input file only contains lines of text (True);
the input file is in w2l list format, including utterance ids and audio path (False)
"""
with path_lst.open("r") as file_lst:
data = [x.strip() for x in file_lst.readlines()]
log.info("Building the lexicon")
out = {}
# id_ path duration normalized_text
for line in data:
if is_raw_text:
words = line.split()
else:
words = line.split()[3:]
for word in words:
if word not in out:
out[word] = 0
out[word] += 1
tmp = list(out.items())
tmp = [(k, v) for k, v in tmp if v >= min_occ]
tmp.sort(reverse=True, key=lambda x: x[1])
return {x for x, v in tmp[:n_best]}
def lexicon_from_lst(
path_lst: Path,
path_tokens: Path,
eow_char: str,
path_out: Path,
path_old_lexicon: Optional[Path] = None,
n_best: int = 5000,
min_occ: int = 10,
is_raw_text: bool = False,
) -> None:
out_lexicon = set()
tokens = read_token_file(path_tokens, eow_char)
log.info("Token file loaded")
out_lexicon = load_words_from_lst(path_lst, n_best, min_occ, is_raw_text)
if path_old_lexicon is not None:
old_lexicon = load_lexicon(path_old_lexicon)
out_lexicon |= old_lexicon.keys()
log.info(f"Saving the lexicon at {path_out}")
save_lexicon(out_lexicon, path_out, eow_char, tokens)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Build a lexicon from the given .lst file")
parser.add_argument(
"-i", "--input_lst", type=str, required=True, help="Path to the input lst file"
)
parser.add_argument(
"--tokens", type=str, required=True, help="Path to the token file"
)
parser.add_argument(
"--max_size_lexicon",
type=int,
help="Number of words to retain.",
default=10000,
)
parser.add_argument(
"--min_occ",
type=int,
help="Number of words to retain.",
default=0,
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="Path to the output file."
)
parser.add_argument("--eow_token", type=str, default="|", help="End of word token.")
parser.add_argument(
"--old_lexicon",
type=str,
default=None,
help="Add the given lexicon to the output file",
)
parser.add_argument(
"--raw_text", action="store_true", help="input is raw text instead of w2l list"
)
args = parser.parse_args()
path_old_lexicon = Path(args.old_lexicon) if args.old_lexicon is not None else None
lexicon_from_lst(
Path(args.input_lst),
Path(args.tokens),
args.eow_token,
Path(args.output),
path_old_lexicon=path_old_lexicon,
n_best=args.max_size_lexicon,
min_occ=args.min_occ,
is_raw_text=args.raw_text,
)
| 4,642 | 27.838509 | 100 |
py
|
wav2letter
|
wav2letter-main/recipes/joint_training_vox_populi/prepare_data/get_tokens.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from common_voice_to_wav2letter import get_base_data_from_csv, PUNCTUATION
from typing import List, Set
import argparse
REPLACE_SET = {"–": "-", "`": "'", "’": "'"}
def get_tokens_from_str(str_in) -> Set[str]:
for c, val in REPLACE_SET.items():
str_in = str_in.replace(c, val)
str_in = str_in.replace(" ", "")
return {x for x in str_in.lower().replace(" ", "")}
def get_tokens_from_str_list(list_str: List[str]) -> Set[str]:
out = set()
for str_in in list_str:
out = out.union(get_tokens_from_str(str_in))
return out
def save_tokens(tokens, path_out, eow_token="|") -> None:
with open(path_out, "w") as f:
for x in tokens:
f.write(x + "\n")
f.write(eow_token)
def main(args):
data = get_base_data_from_csv(args.input_csv)
all_tokens = get_tokens_from_str_list([x["text"] for x in data])
remove_tokens = PUNCTUATION + "…»"
remove_tokens += "1234567890–"
all_tokens = all_tokens.difference({x for x in remove_tokens})
save_tokens(all_tokens, args.output)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Token builder")
parser.add_argument("input_csv")
parser.add_argument("output")
main(parser.parse_args())
| 1,428 | 24.517857 | 74 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/librispeech/lm/prepare_wp_data.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to prepare word-piece data for lm training
Command : python3 prepare.py --data_src [...] --model_src [...]
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import sentencepiece as spm
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="LM data preparation.")
parser.add_argument("--data_src", help="librispeech data")
parser.add_argument("--model_src", help="model auxilary files directory")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(
os.path.join(args.model_src, "am", "librispeech-train-all-unigram-10000.model")
)
for name, suffix in zip(
["librispeech-lm-norm.txt.lower.shuffle", "dev-clean.txt", "dev-other.txt"],
["train", "dev-clean", "dev-other"],
):
with open(os.path.join(args.data_src, "text", name), "r") as fin, open(
os.path.join(args.model_src, "decoder/lm_wp_10k." + suffix), "w"
) as fout:
for line in fin:
result = ""
for word in line.strip().split(" "):
wps = sp.NBestEncodeAsPieces(word, 1)[0]
result += " ".join([w.replace("\u2581", "_") for w in wps]) + " "
fout.write("{}\n".format(result.strip()))
| 1,589 | 31.44898 | 87 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/librispeech/lm/filter_contractions.py
|
from __future__ import print_function
import os
import sys
from multiprocessing.pool import ThreadPool
CONTRACTIONS = "contractions.txt"
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def run_for_id(file_name):
print("Starting thread")
contractions = []
with open(CONTRACTIONS, "r") as c:
for line in c:
contractions.append(line.strip())
print("Parsing input file")
lines = []
with open(file_name, "r") as f:
for line in f:
lines.append(line)
print("Done parsing input file")
# with open(file_name + ".filtered", "w") as f:
filtered_lines = []
counter = 0
for line in lines:
counter += 1
if counter % 10000 == 0:
print("Counter at " + str(counter))
filtered_words = []
for word in line.strip().split(" "):
word = word.strip()
# Take care of cases like "'you'd" or "can't'"
if word[1:] in contractions:
filtered_words.append(word[1:])
elif word[:-1] in contractions:
filtered_words.append(word[:-1])
elif word in contractions or "'s" in word:
filtered_words.append(word)
else:
# Check if between two letters
idx = word.find("'")
if idx != -1:
# Check if apostrophe occurs between two letters (consider valid if so)
if idx + 1 < len(word) and idx != 0:
filtered_words.append(word)
else:
filtered_words.append(word.strip().replace("'", ""))
else:
filtered_words.append(word)
filtered_lines.append(" ".join(filtered_words))
print("Writing output file")
with open(file_name + ".filtered", "w") as f:
for line in filtered_lines:
f.write(line)
f.write("\n")
def run():
# Can be parallelized with a thread pool
# pool = ThreadPool()
# pool.map(run_for_id, [os.path.join(sys.argv[1], f) for f in os.listdir(sys.argv[1])])
run_for_id(os.path.join(sys.argv[1], sys.argv[2]))
if __name__ == "__main__":
run()
| 2,248 | 27.833333 | 91 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/librispeech/lm/sentence_ify.py
|
import nltk
import tqdm
def load():
with open("lmtext_no_am.txt", "r") as fid:
lines = [l.strip() for l in fid]
return lines
if __name__ == "__main__":
lines = load()
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
step = 10000
with open("lmtext_sentences_no_am.txt", "w") as fid:
for i in tqdm.tqdm(range(0, len(lines), step)):
sentences = tokenizer.tokenize(" ".join(lines[i : i + step]))
for l in sentences:
fid.write(l)
fid.write("\n")
| 555 | 24.272727 | 73 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/librispeech/lm/utils.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sox
def findtranscriptfiles(dir):
files = []
for dirpath, _, filenames in os.walk(dir):
for filename in filenames:
if filename.endswith(".trans.txt"):
files.append(os.path.join(dirpath, filename))
return files
def parse_speakers_gender(spk_file):
ret = {}
with open(spk_file, "r") as f:
for line in f:
if line.startswith(";"):
continue
id, gen, _ = line.split("|", 2)
ret[id.strip()] = gen.strip()
return ret
def transcript_to_list(file):
audio_path = os.path.dirname(file)
ret = []
with open(file, "r") as f:
for line in f:
file_id, trans = line.strip().split(" ", 1)
audio_file = os.path.abspath(os.path.join(audio_path, file_id + ".flac"))
duration = sox.file_info.duration(audio_file) * 1000 # miliseconds
ret.append([file_id, audio_file, str(duration), trans.lower()])
return ret
def read_list(src, files):
ret = []
for file in files:
with open(os.path.join(src, file + ".lst"), "r") as f:
for line in f:
_, _, _, trans = line.strip().split(" ", 3)
ret.append(trans)
return ret
| 1,558 | 25.87931 | 85 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/librispeech/lm/prepare_seq2seq_dict.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to prepare dictionary for running experiments with Librispeech datasets in
wav2letter++ pipelines
Please run prepare_data.py first to generate all the required file lists.
Please make sure sentencepiece (https://github.com/google/sentencepiece) is installed.
Command : python3 prepare_seq2seq_dict.py --src [...] --dst [...]
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import sys
import sentencepiece as spm
import utils
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Librispeech dictionary creation.")
parser.add_argument("--src", help="source directory (where *.lst files are)")
parser.add_argument("--dst", help="destination directory", default="./librispeech")
args = parser.parse_args()
filelists = {
"train": [
"train-clean-100",
# "train-clean-360",
# "train-other-500"
],
"dev": ["dev-clean", "dev-other"],
}
num_wordpieces = 5000
nbest = 10
prefix = "librispeech-train-all-unigram-{}".format(num_wordpieces)
prefix = os.path.join(args.dst, prefix)
textfile = os.path.join(args.dst, "train-all.text")
model = prefix + ".model"
vocab = prefix + ".vocab"
# prepare data
sys.stdout.write("preparing data...\n")
sys.stdout.flush()
train_text = utils.read_list(args.src, filelists["train"])
dev_text = utils.read_list(args.src, filelists["dev"])
with open(textfile, "w") as f:
for line in train_text:
f.write(line)
f.write("\n")
word_dict = set()
for line in train_text + dev_text:
words = line.split()
for w in words:
word_dict.add(w)
word_dict = sorted(word_dict)
# train
sys.stdout.write("computing word pieces...\n")
sys.stdout.flush()
train_cmd = "--input={input} --model_prefix={prefix} --vocab_size={sz} ".format(
input=textfile, prefix=prefix, sz=num_wordpieces
)
train_cmd = (
train_cmd
+ "--character_coverage=1.0 --model_type=unigram --split_by_unicode_script=false"
)
spm.SentencePieceTrainer.Train(train_cmd)
# word piece dictionary
sys.stdout.write("creating word piece list...\n")
exclude_list = {"<unk>", "<s>", "</s>"}
with open(vocab + "-filtered", "w") as o:
with open(vocab, "r") as f:
for line in f:
v, _ = line.strip().split("\t", 1)
if v not in exclude_list:
o.write(v.replace("\u2581", "_"))
o.write("\n")
# word -> word piece lexicon for loading targets
sys.stdout.write("creating word -> word pieces lexicon...\n")
sys.stdout.flush()
sp = spm.SentencePieceProcessor()
sp.Load(model)
outfile = "librispeech-train+dev-unigram-{sz}-nbest{n}.dict".format(
sz=num_wordpieces, n=nbest
)
with open(os.path.join(args.dst, outfile), "w") as f:
for word in word_dict:
wps = sp.NBestEncodeAsPieces(word, nbest)
for wp in wps:
f.write(word)
for w in wp:
f.write(" " + w.replace("\u2581", "_"))
f.write("\n")
sys.stdout.write("Done !\n")
| 3,541 | 30.070175 | 89 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/librispeech/lm/generate_lm_raw_text.py
|
import glob
import os
def get_am_bookids():
ambooks_file = "LibriSpeech/BOOKS.TXT"
with open(ambooks_file, "r") as fid:
bookids = [l.split()[0] for l in fid]
clean_bookids = []
for bid in bookids:
try:
int(bid)
clean_bookids.append(bid)
except:
pass
return clean_bookids
def load_lm_books():
lmbook_pattern = "librispeech-lm-corpus/corpus/*/*.txt"
return glob.glob(lmbook_pattern)
def remove_am_books_from_lm(am_book_ids, lm_books):
am_book_ids = set(am_book_ids)
all_lm_books = []
clean_lm_books = []
for lmb in lm_books:
lm_b_id = os.path.splitext(os.path.basename(lmb))[0]
all_lm_books.append(lm_b_id)
if lm_b_id not in am_book_ids:
clean_lm_books.append(lmb)
all_lm_books = set(all_lm_books)
for a_id in am_book_ids:
if a_id not in all_lm_books:
pass
# print(a_id)
return clean_lm_books
def write_lm_books_to_file(lm_books):
lmfile = "lmtext_no_am.txt"
with open(lmfile, "w") as fid:
for lmb in lm_books:
with open(lmb, "r") as f:
for line in f:
if line.strip() != "":
fid.write(line.lower())
if __name__ == "__main__":
am_book_ids = get_am_bookids()
lm_books = load_lm_books()
clean_lm_books = remove_am_books_from_lm(am_book_ids, lm_books)
print(
"Removed {} am books from {} lm books. Left with {} lm books".format(
len(am_book_ids), len(lm_books), len(clean_lm_books)
)
)
# write_lm_books_to_file(lm_books)
| 1,650 | 25.629032 | 77 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/librispeech/lm/clean_lm_text.py
|
import re
from multiprocessing import Pool
import nltk
import tqdm
PUNCTUATION = set(list(",'\"?!#&(){}[]*+=;:.-"))
PUNCTUATION.add("")
def clean(line):
# try:
# new_line = normalise.normalise(line, verbose=False)
# except:
# print("Could not normalize:", line)
# new_line = (t for t in new_line if t not in PUNCTUATION)
# new_line = " ".join(new_line).lower()
new_line = re.sub('[,"?!#&\(\)\{\}\[\]\*+=;:..]', "", line)
new_line = re.sub("-", " ", new_line)
return " ".join(new_line.split()).lower()
def write(lines, fid):
for line in lines:
if line:
fid.write(line)
fid.write("\n")
def load():
with open("lmtext_sentences_no_am.txt.filtered", "r") as fid:
lines = [l for l in fid]
return lines
if __name__ == "__main__":
lines = load()
fid = open("lmtext_clean_no_am.txt", "w")
clean_lines = []
step = 1000000
for i in range(0, len(lines), step):
print("Cleaning lines {} - {}".format(i, i + step))
pool = Pool()
clean_lines = pool.map(clean, lines[i : i + step])
pool.close()
pool.join()
write(clean_lines, fid)
fid.close()
| 1,224 | 23.5 | 65 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/pseudo_labeling/dataset_utils.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
class Transcript(object):
def __init__(self, sid, path, duration, transcript=""):
self.sid = sid
self.path = path
self.duration = duration
self.transcript = transcript
def __hash__(self):
return hash(self.sid)
def __eq__(self, other):
return self.sid == other.sid
def create_transcript_dict_from_listfile(listpath):
res = {}
with open(listpath, "r") as f:
while True:
line = f.readline()
if not line:
break
els = line.split()
sid = els[0]
res[sid] = Transcript(sid, els[1], els[2], " ".join(els[3:]))
return res
listfileformat = """{sid} {path} {duration} {transcript}\n"""
def write_transcript_list_to_file(transcript_dict, outfilename):
with open(outfilename, "w") as f:
for sid in transcript_dict.keys():
transcript = transcript_dict[sid]
f.write(
listfileformat.format(
sid=transcript.sid,
path=transcript.path,
duration=transcript.duration,
transcript=transcript.transcript.rstrip(),
)
)
def zip_datasets(first, second):
"""
Zips together two list dataset representations. Preserves the sample ID for the
"""
output = first
for sample_key in second.keys():
sample = second[sample_key]
if sample.sid in output.keys():
raise Exception("Attempted to write duplicate sample ID: " + sample.sid)
output[sample.sid] = sample
return output
| 1,708 | 27.966102 | 84 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/pseudo_labeling/generate_synthetic_data.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import sys
from dataset_utils import (
create_transcript_dict_from_listfile,
write_transcript_list_to_file,
)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def pair_transcripts_with_existing_list(transcript_list, listpath):
transcripts = create_transcript_dict_from_listfile(listpath)
merged = {}
for pred in transcript_list:
merged[pred.sid] = transcripts[pred.sid]
merged[pred.sid].transcript = pred.prediction
# remove transcripts for which we don't have a prediction (those that were removed)
return merged
def compute_ngrams(inp, size):
return [inp[i : i + size] for i in range(len(inp) - (size - 1))]
def filter_transcripts(transcript_list, args):
# fastpath
if not args.filter:
return transcript_list
filtered_transcripts = []
for transcript in transcript_list:
good = True
# skip transcripts with warnings
if args.warnings:
if transcript.warning:
good = False
if args.print_filtered_results:
eprint(
"Filtering predicted transcript (warning) "
+ transcript.sid
+ ": "
+ transcript.prediction
)
continue
if args.ngram:
plist = transcript.prediction.split(" ")
# look for repeating n-grams
ngrams = [" ".join(c) for c in compute_ngrams(plist, args.ngram_size)]
for gram in ngrams:
if transcript.prediction.count(gram) > args.ngram_appearance_threshold:
good = False
if args.print_filtered_results:
eprint(
"Filtering predicted transcript (ngram fail) "
+ transcript.sid
+ ": "
+ transcript.prediction
)
break
# passes all checks
if good:
filtered_transcripts.append(transcript)
return filtered_transcripts
class TranscriptPrediction(object):
def __init__(self, sid, prediction, transcript, warning=False):
self.sid = sid
self.prediction = prediction
self.transcript = transcript
self.warning = warning
def create_transcript_set(inpath, viterbi=False, distributed_decoding=False):
with open(inpath, "r") as f:
if not distributed_decoding:
# first line is chronos job
f.readline()
predictions = []
while True:
# each glob has
# - actual transcript
# - predicted transcript
# - actual word pieces
# - predicted word pieces
transcript = f.readline()
# check if EOF
if not transcript:
break
# each set is four lines, unless there's a warning
warning = False
if "[WARNING]" in transcript:
transcript = f.readline() # read an extra line to compensate
warning = True
transcript = transcript[
transcript.find("|T|: ") + len("|T|: ") :
] # remove |T|:
predicted = f.readline() # predicted transcript
predicted = predicted[
predicted.find("|P|: ") + len("|P|: ") :
] # remove |P|:
if viterbi:
predicted = predicted.replace(" ", "").replace("_", " ")
transcript = transcript.replace(" ", "").replace("_", " ")
# if distributed_decoding:
# predicted = predicted[1:].replace("_", " ")
# if not viterbi:
# read wp
f.readline()
f.readline()
sample_info = f.readline()
if not sample_info.strip():
continue
sid = sample_info.split(" ")[1]
sid = sid[:-1]
predictions.append(
TranscriptPrediction(sid, predicted, transcript, warning)
)
return predictions
def run():
parser = argparse.ArgumentParser(
description="Converts decoder output into train-ready list-style"
" dataset formats"
)
parser.add_argument(
"-i",
"--input",
type=str,
required=True,
help="Path to decoder output containing transcripts",
)
parser.add_argument(
"-p",
"--listpath",
type=str,
required=True,
help="Path of existing list file dataset or which to replace transcripts",
)
parser.add_argument(
"-w",
"--warnings",
action="store_true",
help="Remove transcripts with EOS warnings by default",
)
parser.add_argument(
"-g",
"--ngram",
action="store_true",
help="Remove transcripts with ngram issues",
)
parser.add_argument(
"-n",
"--ngram_appearance_threshold",
type=int,
required=False,
default=4,
help="The number of identical n-grams that must appear in a "
"prediction for it to be thrown out",
)
parser.add_argument(
"-s",
"--ngram_size",
type=int,
required=False,
default=2,
help="The size of n-gram which will be used when searching for duplicates",
)
parser.add_argument(
"-f", "--filter", action="store_true", help="Run some filtering criteria"
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="Output filepath"
)
parser.add_argument(
"-d",
"--distributed_decoding",
action="store_true",
help="Processing a combined transcript with distributed decoding",
)
parser.add_argument(
"-v",
"--print_filtered_results",
type=bool,
required=False,
default=False,
help="Print transcripts that are filtered based on filter criteria to stderr",
)
parser.add_argument(
"-q",
"--viterbi",
action="store_true",
help="Expects a transcript format that is consistent with a Viterbi run",
)
args = parser.parse_args()
if not os.path.isfile(args.input):
raise Exception("'" + args.input + "' - input file doesn't exist")
if not os.path.isfile(args.listpath):
raise Exception("'" + args.input + "' - listpath file doesn't exist")
transcripts_predictions = create_transcript_set(
args.input, args.viterbi, args.distributed_decoding
)
filtered_transcripts = filter_transcripts(transcripts_predictions, args)
final_transcript_dict = pair_transcripts_with_existing_list(
filtered_transcripts, args.listpath
)
write_transcript_list_to_file(final_transcript_dict, args.output)
if __name__ == "__main__":
run()
| 7,109 | 30.048035 | 87 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/pseudo_labeling/generate_synthetic_lexicon.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import operator
import os
from synthetic_lexicon_utils import (
LexiconEntry,
read_spellings_from_file,
write_spellings_to_file,
)
def generate_wp_selling(wp_list):
spellings = []
this_spelling = []
for wp in wp_list:
if not "_" in wp:
this_spelling.append(wp)
elif "_" in wp:
if len(this_spelling) > 0:
spellings.append(this_spelling)
this_spelling = [wp]
if len(this_spelling) > 0:
spellings.append(this_spelling)
return spellings
def generate(infile):
# maps word --> dict mapping wp spellings to the number of
# times that spelling appears
lexicon = {}
with open(infile, "r") as f:
prediction = None
wp_spelling_raw = None
for line in f:
if "|P|" in line:
# format is "|P|: _[wp]..."
prediction = (
line[line.find("|P|: ") + len("|P|: ") :]
.replace(" ", "")
.replace("_", " ")
)
continue
elif "|p|" in line:
wp_spelling_raw = line[line.find("|p|:") + len("|p|: ") :]
elif "|T|" in line:
continue
elif "|t|" in line:
continue
elif "sample" in line:
continue
elif "WARNING" in line:
continue
elif "CHRONOS" in line:
continue
elif "---" in line:
continue
else:
raise Exception("Format invalid; extraneous line: " + line)
transcription = prediction.strip().split(" ")
wp_spelling = [e.strip() for e in wp_spelling_raw.strip().split(" ") if e]
wp_spelling = generate_wp_selling(wp_spelling)
for transcription_word, wp_spelling_word in zip(transcription, wp_spelling):
wp_key = " ".join(wp_spelling_word)
if transcription_word not in lexicon:
lexicon[transcription_word] = {}
if wp_key not in lexicon[transcription_word]:
lexicon[transcription_word][wp_key] = 0
lexicon[transcription_word][wp_key] += 1
return lexicon
def order_lexicon(lexicon):
spellings = {} # maps a transcription word to its spellings, in order
for transcription_word in lexicon.keys():
spellings[transcription_word] = []
for spelling, _freq in sorted(
lexicon[transcription_word].items(),
key=operator.itemgetter(1),
reverse=True,
):
spellings[transcription_word].append(spelling.split(" "))
return spellings
def create_spellings(spellings):
entries = {}
sorted_keys = sorted(spellings.keys())
for word in sorted_keys:
for spelling in spellings[word]:
if word not in entries:
entries[word] = LexiconEntry(word, [])
entries[word].add_spelling(spelling)
return entries
def run():
parser = argparse.ArgumentParser(
description="Converts decoder output into train-ready lexicon format"
)
parser.add_argument(
"-i",
"--inputhyp",
type=str,
required=True,
help="Path to decoder output using --usewordpiece=false file",
)
parser.add_argument(
"-l",
"--inputlexicon",
type=str,
required=True,
help="Path to the existing lexicon with which to merge a lexicon from the hyp",
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="Path to output lexicon file"
)
args = parser.parse_args()
if not os.path.isfile(args.inputhyp):
raise Exception("'" + args.inputhyp + "' - input file doesn't exist")
if not os.path.isfile(args.inputlexicon):
raise Exception("'" + args.inputlexicon + "' - input file doesn't exist")
lexicon = generate(args.inputhyp)
sorted_spellings = order_lexicon(lexicon)
spellings = create_spellings(sorted_spellings)
new_lexicon = []
for key in sorted(spellings.keys()):
new_lexicon.append(spellings[key])
old_lexicon_spellings = read_spellings_from_file(args.inputlexicon)
old = {}
for entry in old_lexicon_spellings:
old[entry.word] = entry
count = 0
for entry in new_lexicon:
count += 1
if count % 1000 == 0:
print("Processed " + str(count) + " entries in new lexicon.")
if entry.word in old.keys():
# entry in lexicon, check if spelling exists, else append to end
for spelling in entry.sorted_spellings:
if spelling in old[entry.word].sorted_spellings:
continue
else:
# only add spelling if we don't already have it
if spelling not in old[entry.word].sorted_spellings:
old[entry.word].sorted_spellings.append(spelling)
else:
# OOV case: create a new lexicon entry with these spellings
old[entry.word] = entry
final = []
# sort the final spellings
for key in sorted(old.keys()):
final.append(old[key])
write_spellings_to_file(final, args.output)
if __name__ == "__main__":
run()
| 5,462 | 31.135294 | 88 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/pseudo_labeling/combine_synthetic_lexicons.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
from synthetic_lexicon_utils import (
read_spellings_from_file,
write_spellings_to_file,
)
def combine_lexicons(lexicon1, lexicon2):
combined = {}
for lexicon in [lexicon1, lexicon2]:
for entry in lexicon:
key = entry.word
if key in combined:
combined[key].combine_entries(entry)
else:
combined[key] = entry
combined_list = []
for key in sorted(combined.keys()):
combined_list.append(combined[key])
return combined_list
def run():
parser = argparse.ArgumentParser(description="Joins two lexicons")
parser.add_argument(
"-l1", "--lexicon1", type=str, required=True, help="Path to lexicon 1"
)
parser.add_argument(
"-l2", "--lexicon2", type=str, required=True, help="Path to lexicon 2"
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="Path to output lexicon file"
)
args = parser.parse_args()
if not os.path.isfile(args.lexicon1):
raise Exception("'" + args.lexicon1 + "' - input file doesn't exist")
if not os.path.isfile(args.lexicon2):
raise Exception("'" + args.lexicon2 + "' - input file doesn't exist")
lex1 = read_spellings_from_file(args.lexicon1)
lex2 = read_spellings_from_file(args.lexicon2)
combined = combine_lexicons(lex1, lex2)
write_spellings_to_file(combined, args.output)
if __name__ == "__main__":
run()
| 1,580 | 26.258621 | 85 |
py
|
wav2letter
|
wav2letter-main/recipes/self_training/pseudo_labeling/synthetic_lexicon_utils.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
class LexiconEntry(object):
"""
A particular word in the Lexicon and its candidate spellings, sorted by
"""
def __init__(self, word, sorted_spellings):
self.word = word
self.sorted_spellings = sorted_spellings
def add_spelling(self, spelling):
self.sorted_spellings.append(spelling)
def combine_entries(self, other):
# Zip up sorted spellings
new_spellings = []
for this, that in itertools.zip_longest(
self.sorted_spellings, other.sorted_spellings
):
if this == that:
new_spellings.append(this)
else:
if this:
new_spellings.append(this)
if that:
new_spellings.append(that)
self.sorted_spellings = new_spellings
def write_spellings_to_file(spellings, outfile):
"""
Writes an array of Spellings to a file in Lexicon format
"""
sorted_spellings = sorted(spellings, key=lambda spelling: spelling.word)
with open(outfile, "w") as o:
for entry in sorted_spellings:
for spelling in entry.sorted_spellings:
o.write(entry.word.strip() + " " + " ".join(spelling).strip())
o.write("\n")
def read_spellings_from_file(infile):
spellings = {} # maps string to LexiconEntry
with open(infile, "r") as infile:
for line in infile:
s_idx = line.find(" ")
word = line[0:s_idx].strip()
spelling = line[s_idx + 1 :].strip().split(" ")
if word not in spellings:
spellings[word] = LexiconEntry(word, [])
spellings[word].add_spelling(spelling)
out = []
for key in sorted(spellings.keys()):
out.append(spellings[key])
return out
| 1,913 | 29.870968 | 82 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/raw_lm_corpus/get_gb_books_by_id.py
|
import argparse
import os
import sys
from multiprocessing.pool import ThreadPool
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_one_book(book_id, outdir):
eprint("Getting book with id", book_id)
text = strip_headers(load_etext(book_id)).strip()
newpath = os.path.join(outdir, str(book_id) + ".body.txt")
with open(newpath, "w") as outfile:
outfile.write(text)
def main():
parser = argparse.ArgumentParser("Grabs Gutenberg books by ID from a file")
parser.add_argument("--idfile", type=str, required=True)
parser.add_argument("--outdir", type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.idfile):
raise RuntimeError("idfile not found")
with open(args.idfile, "r") as infile:
ids = [(int(line.strip()), args.outdir) for line in infile]
pool = ThreadPool(80)
pool.starmap(get_one_book, ids)
if __name__ == "__main__":
main()
| 1,059 | 24.853659 | 79 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/raw_lm_corpus/filter_distances.py
|
import argparse
import os
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def run(filename, score, distance_ratio):
eprint("Starting with filename ", filename)
with open(filename, "r") as f:
done = 0
for line in f:
done += 1
str1, str2, scoreRaw = line.split("|")
distance = float(scoreRaw)
len1 = len(str1.split())
len2 = len(str2.split())
maxlen = max(len1, len2)
minlen = min(len1, len2)
if (
maxlen - minlen
) / minlen < distance_ratio and distance <= score * maxlen:
print("{s1}|{s2}|{d}".format(s1=str1, s2=str2, d=scoreRaw.strip()))
if done % 1000000 == 0:
eprint(done)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Filters levenshtein scored title pairs")
parser.add_argument("--infile", type=str, required=True)
parser.add_argument("--score", type=float, required=True)
parser.add_argument("--distance_ratio", type=float, required=True)
args = parser.parse_args()
if not os.path.exists(args.infile):
raise ValueError("infile not found")
run(args.infile, args.score, args.distance_ratio)
| 1,284 | 29.595238 | 83 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/raw_lm_corpus/process_raw_text.py
|
import argparse
import os
from multiprocessing.pool import ThreadPool
from gutenberg.cleanup import strip_headers
def strip_header(name):
print(name)
with open(name, "r") as intext:
buf = intext.read().encode("utf-8")
return strip_headers(buf).strip()
def extract_one_book(book_path):
content = strip_header(book_path)
newname = os.path.splitext(book_path)[0] + ".body.txt"
with open(newname, "w") as outfile:
outfile.write(content)
def main():
parser = argparse.ArgumentParser("Processes input Gutenberg text")
parser.add_argument("--indir", type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.indir):
raise RuntimeError("indir not found")
books = [os.path.join(args.indir, f) for f in os.listdir(args.indir)]
pool = ThreadPool(1)
pool.map(extract_one_book, books)
if __name__ == "__main__":
main()
| 924 | 22.717949 | 73 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/raw_lm_corpus/get_titles.py
|
import argparse
import os
import xml.etree.ElementTree as ET
from multiprocessing.pool import ThreadPool
from gutenberg.query import get_metadata
CACHE_PATH = ""
def get_one_title_from_cache(book_id):
return (book_id, get_metadata("title", int(book_id)))
def get_one_title(book_id):
try:
title = (
ET.parse("{c}/{bid}/pg{bid}.rdf".format(c=CACHE_PATH, bid=str(book_id)))
.getroot()
.find(
"./{http://www.gutenberg.org/2009/pgterms/}ebook/{http://purl.org/dc/terms/}title"
)
.text.replace("\n", " ")
.replace("\r", "")
)
return (book_id, title)
except AttributeError:
print("Could not get title for book with id", book_id)
return (book_id, "---DELETE-NO_TITLE")
def main():
parser = argparse.ArgumentParser(
"Gets title metadata given a collection of book ids"
)
parser.add_argument("--infile", type=str, required=True)
parser.add_argument("--outfile", type=str, required=True)
parser.add_argument("--cachepath", type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.infile):
raise ValueError("indir not found")
if not os.path.exists(args.cachepath):
raise ValueError("cachepath not found")
global CACHE_PATH
CACHE_PATH = args.cachepath
book_ids = []
with open(args.infile, "r") as f:
book_ids = [line.rstrip() for line in f.readlines()]
print("Starting thread pool")
pool = ThreadPool(80)
id_title_tuples = pool.map(get_one_title, book_ids)
print("Metadata acquisition complete")
with open(args.outfile, "w") as o:
for bid, title in id_title_tuples:
o.write("{b}|{t}\n".format(b=bid, t=title))
if __name__ == "__main__":
main()
| 1,836 | 25.623188 | 98 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/raw_lm_corpus/join_ids.py
|
import argparse
import os
import sys
def run(basefile, tablefile, separator):
# Read IDs
with open(basefile, "r") as f:
titles = [line.strip() for line in f]
# Make table
with open(tablefile, "r") as f:
table = {}
for line in f:
book_id, book_title = line.strip().split(separator)
table[book_title] = book_id
# Lookup
for key in titles:
if key in table:
sys.stdout.write("{key}|{val}\n".format(key=key, val=table[key]))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Join IDs from a table")
parser.add_argument("--basefile", type=str, required=True)
parser.add_argument("--tablefile", type=str, required=True)
parser.add_argument("--separator", type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.basefile):
raise ValueError("basefile not found")
if not os.path.exists(args.tablefile):
raise ValueError("tablefile not found")
run(args.basefile, args.tablefile, args.separator)
| 1,068 | 26.410256 | 77 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_analysis/filter_segmentations.py
|
import sys
from collections import defaultdict
def count(MIN_SIL_LENGTH, align_file):
lines = []
with open(align_file) as fin:
lines = fin.readlines()
res = {}
res["word_counter"] = [0] * 100 # number of word in each small chunk
res["chunk_counter"] = [0] * 100 # number of small chunk per audio
stat = defaultdict(list)
good_samples = []
for line in lines:
sp = line.split("\t")
# filename = sp[0]
alignments = sp[1].strip().split("\\n")
# Parse the alignments
chunk_starts = [0]
chunk_ends = []
words = []
cur_words = []
cur_end = 0
for i, alignment in enumerate(alignments):
sp = alignment.split()
begin = float(sp[2])
length = float(sp[3])
word = sp[4]
cur_end = begin + length
if i == 0:
continue
if word == "$":
if length > MIN_SIL_LENGTH:
chunk_ends.append(cur_end)
chunk_starts.append(cur_end)
words.append(" ".join(cur_words))
cur_words = []
continue
cur_words.append(word)
if len(cur_words) > 0:
chunk_ends.append(cur_end)
words.append(" ".join(cur_words))
else:
chunk_starts.pop()
# res
good = True
n_chunk = len(words)
# filter if n_segments == 1
if n_chunk < 2:
good = False
res["chunk_counter"][n_chunk] += 1
for word_chunk in words:
n_word = len(word_chunk.split())
res["word_counter"][n_word] += 1
stat[n_chunk].append(n_word)
# filter if number of words in a segment > 6
if n_word > 6:
good = False
if good:
good_samples.append(line)
print(len(good_samples))
return res, stat, good_samples
if __name__ == "__main__":
align_file = sys.argv[1]
original_file = sys.argv[2]
res, data, samples = count(0.13, align_file)
print(res)
fnames = set([line.strip().split("\t")[0].split("/")[-1] for line in samples])
# prepare original filtered file
with open(original_file, "r") as f, open(
"original.filtered_chunk_g1_ngrams_le6.lst", "w"
) as fout:
for line in f:
if line.split(" ")[1].split("/")[-1] in fnames:
fout.write(line)
with open(align_file + ".filtered_chunk_g1_ngrams_le6", "w") as f:
for sample in samples:
f.write(sample)
| 2,630 | 26.989362 | 82 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_analysis/tts_forward.py
|
# https://github.com/mozilla/TTS/blob/master/notebooks/Benchmark.ipynb - original code which we adapted
import io
import os
import sys
import time
from collections import OrderedDict
import numpy as np
import torch
from localimport import localimport
from matplotlib import pylab as plt
from TTS.layers import *
from TTS.models.tacotron import Tacotron
from TTS.utils.audio import AudioProcessor
from TTS.utils.data import *
from TTS.utils.generic_utils import load_config, setup_model
from TTS.utils.synthesis import synthesis
from TTS.utils.text import text_to_sequence
from TTS.utils.text.symbols import phonemes, symbols
sys.path.append("TTS")
sys.path.append("WaveRNN")
tts_pretrained_model_config = "tts_models/config.json"
wavernn_pretrained_model_config = "wavernn_models/config.json"
wavernn_pretrained_model = "wavernn_models/checkpoint_433000.pth.tar"
tts_pretrained_model = "tts_models/checkpoint_261000.pth.tar"
def tts(model, text, CONFIG, use_cuda, ap, use_gl, speaker_id=None):
t_1 = time.time()
waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens = synthesis(
model,
text,
CONFIG,
use_cuda,
ap,
truncated=True,
enable_eos_bos_chars=CONFIG.enable_eos_bos_chars,
)
if CONFIG.model == "Tacotron" and not use_gl:
mel_postnet_spec = ap.out_linear_to_mel(mel_postnet_spec.T).T
if not use_gl:
waveform = wavernn.generate(
torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0).cuda(),
batched=batched_wavernn,
target=11000,
overlap=550,
)
print(" > Run-time: {}".format(time.time() - t_1))
return alignment, mel_postnet_spec, stop_tokens, waveform
use_cuda = True
batched_wavernn = True
# initialize TTS
CONFIG = load_config(tts_pretrained_model_config)
print(CONFIG)
# load the model
num_chars = len(phonemes) if CONFIG.use_phonemes else len(symbols)
model = setup_model(num_chars, CONFIG)
# load the audio processor
ap = AudioProcessor(**CONFIG.audio)
# load model state
if use_cuda:
cp = torch.load(tts_pretrained_model)
else:
cp = torch.load(tts_pretrained_model, map_location=lambda storage, loc: storage)
# load the model
model.load_state_dict(cp["model"])
if use_cuda:
model.cuda()
model.eval()
print(cp["step"])
model.decoder.max_decoder_steps = 2000
# initialize WaveRNN
VOCODER_CONFIG = load_config(wavernn_pretrained_model_config)
with localimport("/content/WaveRNN") as _importer:
from models.wavernn import Model
bits = 10
wavernn = Model(
rnn_dims=512,
fc_dims=512,
mode="mold",
pad=2,
upsample_factors=VOCODER_CONFIG.upsample_factors, # set this depending on dataset
feat_dims=VOCODER_CONFIG.audio["num_mels"],
compute_dims=128,
res_out_dims=128,
res_blocks=10,
hop_length=ap.hop_length,
sample_rate=ap.sample_rate,
).cuda()
check = torch.load(wavernn_pretrained_model)
wavernn.load_state_dict(check["model"])
if use_cuda:
wavernn.cuda()
wavernn.eval()
print(check["step"])
def run_tts(transcription, sample_id, name):
_, _, _, wav = tts(
model,
transcription,
CONFIG,
use_cuda,
ap,
speaker_id=0,
use_gl=False,
figures=False,
)
ap.save_wav(wav, name)
with open(sys.argv[1], "r") as f:
transcriptions = [line.strip() for line in f]
sample_ids = np.arange(len(transcriptions))
names = [sys.argv[2] + str(sample_id) + ".wav" for sample_id in sample_ids]
for index in range(len(transcriptions)):
run_tts(transcriptions[index], sample_ids[index], names[index])
| 3,613 | 26.378788 | 103 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_analysis/generate_shuffle_dev_other_tts.py
|
import os
import sys
import numpy
numpy.random.seed(42)
with open(os.path.join(sys.argv[1], "dev-other.lst"), "r") as f:
data = [line.strip() for line in f]
for n, seed_val in enumerate([0, 2, 3, 4, 5]):
numpy.random.seed(42 + seed_val)
data = numpy.random.permutation(data)
with open("tts_shuffled_{}.txt".format(n), "w") as fout:
for line in data:
line_new = line.split(" ")
new_tr = numpy.random.permutation(line_new[3:])
fout.write(line + "\n")
fout.write("{}\n".format(" ".join(new_tr)))
| 572 | 23.913043 | 64 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_analysis/shuffle_segments.py
|
import os
import random
import sys
from multiprocessing import Pool
import sox
align_file = sys.argv[1]
output_dir = sys.argv[2]
lines = []
with open(align_file) as fin:
lines = fin.readlines()
N_THREADS = 40
MIN_SIL_LENGTH = 0.13
TOLERANCE = 0.04
def process(parameters):
tid, n_samples = parameters
output_list = output_dir + "dev-other.{}.lst".format(tid)
with open(output_list, "w") as fout:
for i in range(tid * n_samples, min(len(lines), n_samples * (tid + 1))):
line = lines[i]
sp = line.split("\t")
filename = sp[0]
# print(filename)
# duration = sox.file_info.duration(filename)
alignments = sp[1].strip().split("\\n")
# Parse the alignments
chunk_starts = [0]
chunk_ends = []
words = []
cur_words = []
cur_end = 0
for i, alignment in enumerate(alignments):
sp = alignment.split()
begin = float(sp[2])
length = float(sp[3])
word = sp[4]
cur_end = begin + length
if i == 0:
continue
if word == "$":
if length > MIN_SIL_LENGTH:
chunk_ends.append(cur_end - TOLERANCE)
chunk_starts.append(cur_end - TOLERANCE)
words.append(" ".join(cur_words))
cur_words = []
continue
cur_words.append(word)
if len(cur_words) > 0:
chunk_ends.append(cur_end)
words.append(" ".join(cur_words))
else:
chunk_starts.pop()
# print(duration)
# print(chunk_starts)
# print(chunk_ends)
# print(words)
# Split the audios
order = list(range(len(chunk_starts)))
random.shuffle(order)
new_target = " ".join([words[i] for i in order])
new_audio_path = output_dir + filename.split("/")[-1]
fout.write(
"{}\t{}\t{}\t{}\n".format(
new_audio_path, new_audio_path, chunk_ends[-1] * 1000, new_target
)
)
if len(chunk_starts) == 1:
os.system("cp {} {}".format(filename, output_dir))
continue
paths = []
for i in order:
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="flac", encoding="signed-integer", bits=16, rate=16000
)
sox_tfm.trim(chunk_starts[i], chunk_ends[i])
new_path = "/tmp/{}_{}.flac".format(tid, i)
sox_tfm.build(filename, new_path)
paths.append(new_path)
# Combine them
sox_comb = sox.Combiner()
sox_comb.build(list(paths), new_audio_path, "concatenate")
if __name__ == "__main__":
n_sample_per_thread = len(lines) // N_THREADS + 1
print(
"Spreading {} threads with {} samples in each".format(
N_THREADS, n_sample_per_thread
)
)
pool = Pool(N_THREADS)
pool.map(process, zip(list(range(N_THREADS)), [n_sample_per_thread] * N_THREADS))
pool.close()
pool.join()
| 3,394 | 28.017094 | 85 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/rescoring/forward_lm.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import numpy
import torch
from fairseq.data import Dictionary
from fairseq.models.fconv_lm import FConvLanguageModel
from fairseq.models.transformer_lm import TransformerLanguageModel
def load_lm(lm_path, model_type, dict_path):
path, checkpoint = os.path.split(lm_path)
if model_type == "convlm":
model_handle = FConvLanguageModel.from_pretrained(
path, checkpoint, os.path.split(dict_path)[0]
)
elif model_type == "transformer":
model_handle = TransformerLanguageModel.from_pretrained(
path, checkpoint, os.path.split(dict_path)[0]
)
else:
raise Exception(
"Unsupported language model type: use 'convlm' or 'transformer' models"
)
model = model_handle.models[0].decoder.cuda()
model.eval()
print(model)
return model
def predict_batch(sentences, model, fairseq_dict, max_len):
encoded_input = []
padded_input = []
ppls = []
total_loss = 0.0
nwords = 0
for sentence in sentences:
encoded_input.append([fairseq_dict.index(token) for token in sentence])
assert (
len(encoded_input[-1]) <= max_len
), "Error in the input length, it should be less than max_len {}".format(
max_len
)
if len(encoded_input[-1]) < max_len:
padded_input.append(
[fairseq_dict.eos()]
+ encoded_input[-1]
+ [fairseq_dict.eos()] * (max_len - len(encoded_input[-1]))
)
else:
padded_input.append([fairseq_dict.eos()] + encoded_input[-1])
x = torch.LongTensor(padded_input).cuda()
with torch.no_grad():
y = model.forward(x)[0]
if model.adaptive_softmax is not None:
logprobs = (
model.adaptive_softmax.get_log_prob(y, None).detach().cpu().numpy()
)
else:
logprobs = torch.nn.functional.log_softmax(y, 2).detach().cpu().numpy()
for index, input_i in enumerate(encoded_input):
loss = numpy.sum(logprobs[index, numpy.arange(len(input_i)), input_i])
loss += logprobs[index, len(input_i), fairseq_dict.eos()]
ppls.append(loss)
total_loss += loss
nwords += len(input_i) + 1
return ppls, total_loss, nwords
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Running forward pass for language model"
)
parser.add_argument("--model", required=True, type=str, help="path to the model")
parser.add_argument(
"--dict", required=True, type=str, help="path to the dict of the model"
)
parser.add_argument(
"--max-tokens",
required=True,
type=int,
default=1024,
help="max tokens in the batch",
)
parser.add_argument(
"--text", required=True, type=str, help="path to text to be evaluated"
)
parser.add_argument(
"--out", type=str, default="out.txt", help="path to text to be saved"
)
parser.add_argument(
"--skip",
type=bool,
default=False,
help="skip <sampleID> <decoder score> <AM score> tokens",
)
parser.add_argument(
"--model-type",
required=True,
type=str,
help="Language model type, supported values 'convlm' and 'transformer'",
)
args = parser.parse_args()
fairseq_dict = Dictionary.load(args.dict)
model = load_lm(args.model, args.model_type, args.dict)
total_loss = 0.0
nwords = 0.0
batch = []
original_lines = []
max_len = 0
with open(args.text, "r") as ftext, open(args.out, "w") as fout:
for line in ftext:
# id | decoder score | am score | lm score | wer | transcription
line_parsed = line.rstrip().split("|")
sentence = line_parsed[-1].strip().split(" ")
if (len(batch) + 1) * numpy.maximum(
max_len, len(sentence)
) > args.max_tokens:
if len(batch) == 0:
if args.skip:
original_lines.append(line_parsed[0].strip().split(" ")[0])
batch.append(sentence)
max_len = len(sentence)
continue
ppls, loss_batch, nwords_batch = predict_batch(
batch, model, fairseq_dict, max_len
)
total_loss += loss_batch
nwords += nwords_batch
for index in range(len(batch)):
if args.skip:
fout.write(original_lines[index] + " {}\n".format(ppls[index]))
else:
fout.write("{}\n".format(ppls[index]))
batch = [sentence]
if args.skip:
original_lines = [line_parsed[0].strip().split(" ")[0]]
max_len = len(sentence)
else:
batch.append(sentence)
if args.skip:
original_lines.append(line_parsed[0].strip().split(" ")[0])
max_len = numpy.maximum(max_len, len(sentence))
if len(batch) > 0:
ppls, loss_batch, nwords_batch = predict_batch(
batch, model, fairseq_dict, max_len
)
total_loss += loss_batch
nwords += nwords_batch
for index in range(len(batch)):
if args.skip:
fout.write(original_lines[index] + " {}\n".format(ppls[index]))
else:
fout.write("{}\n".format(ppls[index]))
print("Total PPL", numpy.exp(-total_loss / nwords))
| 5,773 | 34.207317 | 87 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/rescoring/rescore.py
|
import argparse
from collections import defaultdict
from multiprocessing import Pool
import numpy as np
TOP_K = [1]
ref_dict = {}
res_dict = defaultdict(list)
def score(x, wts):
return (
x["am_score"]
+ wts["tr"] * x["tr"]
+ wts["convlm"] * x["convlm"]
+ wts["len"] * x["wl_len"]
)
def compute(wts):
total_len = 0
original_wer = 0.0
oracle_wer = 0.0
topk_wer = [0.0] * len(TOP_K)
for sample, transcription in ref_dict.items():
transcription_len = len(transcription)
total_len += transcription_len
# if sample not in res_dict:
# continue
hyps = res_dict[sample]
hyps = sorted(hyps, key=lambda x: -x["decoder_score"])
# Original
original_order = hyps
original_wer += original_order[0]["wer"] * transcription_len
# Oracle
oracle_order = sorted(hyps, key=lambda x: x["wer"])
oracle_wer += oracle_order[0]["wer"] * transcription_len
# Top K
for i, k in enumerate(TOP_K):
order = sorted(hyps[:k], key=lambda x: -score(x, wts))
topk_wer[i] += order[0]["wer"] * transcription_len
return {
"original_wer": original_wer / total_len,
"oracle_wer": oracle_wer / total_len,
"topk_wer": [w / total_len for w in topk_wer],
"best_wts_trail": wts,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--hyp", type=str, required=True, help="candidates beam dump path"
)
parser.add_argument(
"--list", type=str, required=True, help="data list with original transcriptions"
)
parser.add_argument("--in_wts", type=str, required=True, help="weights to eval")
parser.add_argument("--convlm", type=str, default="", help="convlm ppl file path")
parser.add_argument("--tr", type=str, default="", help="transformer ppl file path")
parser.add_argument(
"--search",
action="store_true",
help="search or not optimal weights of rescoring",
)
parser.add_argument(
"--top",
type=str,
default="small",
help="large beam or not, defines the topk set",
)
parser.add_argument(
"--gridsearch",
action="store_true",
help="use grid search instead of random search",
)
args = parser.parse_args()
if args.top == "large":
TOP_K = [2, 10, 100, 500, 1000, 2500]
else:
TOP_K = [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 100, 200, 250]
with open(args.list) as f:
for line in f:
data = line.strip().split()
ref_dict[data[0]] = data[3:]
lm_files = {
key: open(name, "r")
for name, key in zip([args.convlm, args.tr], ["convlm", "tr"])
if name != ""
}
with open(args.hyp, "r") as f:
for line in f:
data = line.strip().split("|")
# sample ID, decoder score, am score, lm score, wer
non_transcription = data[:5]
audio_id = non_transcription[0].strip()
transcription = data[5].strip().split(" ")
values_dict = {
"wer": float(non_transcription[4]),
"decoder_score": float(non_transcription[1]),
"am_score": float(non_transcription[2]),
"lm_score": float(non_transcription[3]),
"wl_len": len(transcription) + len("".join(transcription)),
}
for key in ["convlm", "tr"]:
if key in lm_files:
values_dict[key] = float(
lm_files[key].readline().strip().split(" ")[1]
)
else:
values_dict[key] = 0.0
res_dict[audio_id].append(values_dict)
for f_d in lm_files.values():
f_d.close()
best_wts = {
name: float(val)
for val, name in zip(args.in_wts.split(","), ["tr", "convlm", "len"])
}
if args.search:
print("searching", flush=True)
min_wer = 100
weights = []
if args.gridsearch:
# w1: tr LM weight
# w2: convlm weight
# w3: word score
for w1 in [i for i in np.arange(0.0, 1.0, 0.1)]:
for w2 in [i for i in np.arange(-0.3, 0.3, 0.1)]:
for w3 in [i for i in np.arange(0.0, 1.0, 0.1)]:
weights.append({"tr": w1, "convlm": w2, "len": w3})
else:
for _ in range(1000):
weights.append(
{
"tr": np.random.rand() * 2.5,
"convlm": (np.random.rand() - 0.5) * 2,
"len": (np.random.rand() - 0.5) * 6,
}
)
num_tries = len(weights)
print("Total number of search points", num_tries)
threads = 50
pool = Pool(threads)
results = pool.map(compute, weights)
pool.close()
pool.join()
assert len(results) == len(weights)
for result in results:
if min(result["topk_wer"]) < min_wer:
min_wer = min(result["topk_wer"])
best_wts = result["best_wts_trail"]
print(best_wts, min_wer)
best_result = compute(best_wts)
print("| Original WER", best_result["original_wer"])
print("| Oracle WER", best_result["oracle_wer"])
for i, k in enumerate(TOP_K):
print("| Top-{} rescored WER".format(k), best_result["topk_wer"][i])
| 5,585 | 30.738636 | 88 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm/prepare_wp_data.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to prepare word-piece data for lm training
Command : python3 prepare.py --data_src [...] --model_src [...]
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import sentencepiece as spm
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="LM data preparation.")
parser.add_argument("--data_src", help="librispeech data")
parser.add_argument("--model_src", help="model auxilary files directory")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(
os.path.join(args.model_src, "am", "librispeech-train-all-unigram-10000.model")
)
for name, suffix in zip(
["librispeech-lm-norm.txt.lower.shuffle", "dev-clean.txt", "dev-other.txt"],
["train", "dev-clean", "dev-other"],
):
with open(os.path.join(args.data_src, "text", name), "r") as fin, open(
os.path.join(args.model_src, "decoder/lm_wp_10k." + suffix), "w"
) as fout:
for line in fin:
result = ""
for word in line.strip().split(" "):
wps = sp.NBestEncodeAsPieces(word, 1)[0]
result += " ".join([w.replace("\u2581", "_") for w in wps]) + " "
fout.write("{}\n".format(result.strip()))
| 1,589 | 31.44898 | 87 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/generate_uniq.py
|
import sys
pl_data = []
with open(sys.argv[1], "r") as f:
for line in f:
pl_data.append(line.strip())
pl_data = set(pl_data)
with open(sys.argv[1] + ".unique", "w") as f:
for elem in pl_data:
f.write(elem + "\n")
| 240 | 17.538462 | 45 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/dump_title.py
|
import sys
for _, line in enumerate(sys.stdin):
print(line.strip() + "\n")
| 81 | 12.666667 | 36 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/preprocessing.py
|
import re
import sys
time_degree = {
"min.": "minutes",
"deg.": "degrees",
"sec.": "seconds",
"hrs.": "hours",
"hr.": "hour",
}
abbr_mapping = {
"mr.": "mister",
"mr": "mister",
"dr.": "doctor",
"dr": "doctor",
"ms.": "miss",
"ms": "miss",
"mrs.": "missus",
"mrs": "missus",
"vs.": "versus",
"vs": "versus",
"st.": "saint",
"st": "saint",
}
numbers = set("0123456789")
time_set1 = set(":0123456789")
time_set2 = set("/0123456789")
for _, line in enumerate(sys.stdin):
line = line.strip()
line = re.sub(" +", " ", line).strip()
new_line = []
prev_word = ""
for word in line.split():
if (
word.lower() in time_degree
and len(set.intersection(numbers, set(prev_word))) > 0
):
new_line.append(time_degree[word.lower()])
elif len(set(word) - time_set1) == 0:
for part in word.split(":"):
new_line.append(part)
elif len(set(word) - time_set2) == 0:
for part in word.split("/"):
new_line.append(part)
elif word.lower() in abbr_mapping:
new_line.append(abbr_mapping[word.lower()])
elif "&c" in word:
new_line.append(word.replace("&c", " et cetera "))
else:
new_line.append(word)
prev_word = word
print(" ".join(new_line))
| 1,401 | 24.490909 | 66 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/dump.py
|
import sys
prev_line = "hello world"
for _, line in enumerate(sys.stdin):
line = line.strip()
if prev_line != "":
print(line, end=" ")
prev_line = line
else:
print("\n" + line, end=" ")
prev_line = line
| 248 | 19.75 | 36 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/generate_frequencies.py
|
import sys
from collections import defaultdict
if __name__ == "__main__":
words_dict = defaultdict(int)
path = sys.argv[1]
out_path = path + ".freq"
with open(path, "r") as f:
for line in f:
for word in line.strip().split():
words_dict[word] += 1
with open(out_path, "w") as fout:
for word, count in sorted(
words_dict.items(), key=lambda kv: kv[1], reverse=True
):
fout.write("{} {}\n".format(word, count))
| 506 | 27.166667 | 66 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/generate_kenlm_vocab.py
|
import sys
if __name__ == "__main__":
path = sys.argv[1]
vocab_size = int(sys.argv[2])
out_path = path + ".kenlm.{}vocab".format(sys.argv[3])
with open(path, "r") as f, open(out_path, "w") as fout:
for index, line in enumerate(f):
if index >= vocab_size:
break
fout.write(line.strip().split(" ")[0] + " ")
| 372 | 27.692308 | 59 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/postprocessing_title.py
|
import re
import string
import sys
import unicodedata
LOWER_LETTERS = set(string.ascii_lowercase)
ACCEPTED_LETTERS = set.union(LOWER_LETTERS, set("0123456789"), {"'"})
if __name__ == "__main__":
for text in sys.stdin:
# stay only ascii symbols
nfkd_form = unicodedata.normalize("NFKD", text.strip())
nfkd_text = u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
# lowercase text
nfkd_text = nfkd_text.lower()
# remove hyphen
nfkd_text = nfkd_text.replace("-", " ")
# change & -> and
nfkd_text = nfkd_text.replace("&", " and ")
nfkd_text = re.sub(" +", " ", nfkd_text).strip()
# stay words with at least one letter and containing only available tokens
# otherviwe skip a word
cleaned_text = []
for word in nfkd_text.split(" "):
word = word.lower()
if len(set(word).intersection(ACCEPTED_LETTERS)) > 0:
# add word if it contains acceptable tokens
if len(set(word) - ACCEPTED_LETTERS) == 0:
cleaned_text.append(word)
# add word if last token is . (remove this dot): like words dr., ms., etc
elif "." in word and len(word) > 1:
cleaned_text.append(word)
else:
cleaned_text.append(
"".join([letter for letter in word if letter in ACCEPTED_LETTERS])
)
# merge ' for the case ...s'
elif word == "'":
if (
len(cleaned_text) > 0
and len(cleaned_text[-1]) > 1
and cleaned_text[-1][-1] == "s"
and cleaned_text[-1][-2] != "'"
):
cleaned_text[-1] += word
cleaned_text = " ".join(cleaned_text)
# remove extra whitespaces
cleaned_text = re.sub(" +", " ", cleaned_text).strip()
# check if text is empty
if len(cleaned_text) == 0:
continue
# merge '... with its word
final_text = []
for word in cleaned_text.split(" "):
if word[0] != "'":
final_text.append(word)
else:
if len(final_text) > 0:
final_text[-1] += word
else:
final_text.append(word)
print(" ".join(final_text).strip())
| 2,461 | 36.30303 | 90 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/postprocessing.py
|
import re
import string
import sys
import unicodedata
import num2words
import roman
LOWER_LETTERS = set(string.ascii_lowercase)
NUMBERS = set("0123456789,")
ROMANS = set("IVXLCDM")
ACCEPTED_LETTERS = set.union(LOWER_LETTERS, {"'"})
PUNCTUATION = set(".,()[]!?")
abbr_mapping = {
"mr.": "mister",
"mr": "mister",
"dr.": "doctor",
"dr": "doctor",
"ms.": "miss",
"ms": "miss",
"mrs.": "missus",
"mrs": "missus",
"vs.": "versus",
"vs": "versus",
"st.": "saint",
"st": "saint",
}
if __name__ == "__main__":
for text in sys.stdin:
# stay only ascii symbols
nfkd_form = unicodedata.normalize("NFKD", text.strip())
nfkd_text = u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
# remove hyphen
nfkd_text = nfkd_text.replace("-", " ")
# change & -> and
nfkd_text = nfkd_text.replace("&", " and ")
nfkd_text = re.sub(" +", " ", nfkd_text).strip()
# stay words with at least one letter and containing only available tokens
# otherviwe skip a word
cleaned_text = []
splitted_text = nfkd_text.split(" ")
for index, word in enumerate(splitted_text):
if word == "":
continue
# convert roman numbers
if len(set(word) - ROMANS) == 0 and (
word != "I"
or (
word == "I"
and index > 0
and (
splitted_text[index - 1] == "Chapter"
or splitted_text[index - 1] == "CHAPTER"
)
)
):
try:
word = str(roman.fromRoman(word))
except Exception:
pass
elif (
len(set(word[:-1]) - ROMANS) == 0
and len(word) > 1
and word[-1] in PUNCTUATION
):
try:
word = str(roman.fromRoman(word[:-1]))
except Exception:
pass
# lowercase text
word = word.lower()
# process dollars
if word == "$":
add_dollar = True
cleaned_text.append("dollars")
continue
if word[0] == "$" and len(word) > 1:
assert 1 == 0, word
# preserve numbers
if len(set(word) - NUMBERS) == 0 and word != ",":
word = word.replace(",", "")
if not add_dollar:
cleaned_text.append(
num2words.num2words(int(word), to="year")
.replace(" oh-", " o ")
.replace("-", " ")
.replace(",", "")
)
if add_dollar:
cleaned_text[-1] = (
num2words.num2words(int(word), to="cardinal")
.replace("-", " ")
.replace(",", "")
)
cleaned_text.append("dollars")
add_dollar = False
continue
add_dollar = False
if (
len(word) > 2
and len(set(word[:-2]) - NUMBERS) == 0
and (
word[-2:] == "th"
or word[-2:] == "st"
or word[-2:] == "nd"
or word[-2:] == "rd"
)
):
cleaned_text.append(
num2words.num2words(int(word[:-2].replace(",", "")), to="ordinal")
.replace("-", " ")
.replace(",", "")
)
continue
if word in abbr_mapping:
cleaned_text.append(abbr_mapping[word])
elif len(set(word).intersection(LOWER_LETTERS)) > 0:
# add word if it contains acceptable tokens
if len(set(word) - ACCEPTED_LETTERS) == 0:
cleaned_text.append(word)
# add word if last token is . (remove this dot): like words dr., ms., etc
elif len(set(word[:-1]) - ACCEPTED_LETTERS) == 0 and word[-1] == ".":
cleaned_text.append(word[:-1])
# merge ' for the case ...s'
elif word == "'":
if (
len(cleaned_text) > 0
and len(cleaned_text[-1]) > 1
and cleaned_text[-1][-1] == "s"
and cleaned_text[-1][-2] != "'"
):
cleaned_text[-1] += word
cleaned_text = " ".join(cleaned_text)
# remove extra whitespaces
cleaned_text = re.sub(" +", " ", cleaned_text).strip()
# check if text is empty
if len(cleaned_text) == 0:
continue
# merge '... with its word
final_text = []
for word in cleaned_text.split(" "):
if word[0] != "'":
final_text.append(word)
else:
if len(final_text) > 0:
final_text[-1] += word
final_text = " ".join(final_text).strip()
if final_text == "":
continue
print(final_text)
| 5,348 | 33.288462 | 89 |
py
|
wav2letter
|
wav2letter-main/recipes/sota/2019/lm_corpus_and_PL_generation/skip_paragraph.py
|
import sys
for _, line in enumerate(sys.stdin):
line = line.strip()
if line == "<P>":
continue
else:
print(line)
| 143 | 13.4 | 36 |
py
|
wav2letter
|
wav2letter-main/recipes/data/fisher/utils.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sox
def find_files(src):
src_dirs = src.split(",")
required_dirs = [
"fe_03_p1_sph1",
"fe_03_p1_sph3",
"fe_03_p1_sph5",
"fe_03_p1_sph7",
"fe_03_p2_sph1",
"fe_03_p2_sph3",
"fe_03_p2_sph5",
"fe_03_p2_sph7",
"fe_03_p1_sph2",
"fe_03_p1_sph4",
"fe_03_p1_sph6",
"fe_03_p2_sph2",
"fe_03_p2_sph4",
"fe_03_p2_sph6",
"fe_03_p1_tran",
"fe_03_p2_tran",
]
dir_mapping = {}
for dir in src_dirs:
for curdir in os.listdir(dir):
fulldir = os.path.join(dir, curdir)
if not os.path.isdir(fulldir):
continue
for req_dir in required_dirs:
new_style_req_dir = req_dir.replace(
"fe_03_p1_sph", "fisher_eng_tr_sp_d"
)
if curdir == req_dir or curdir == new_style_req_dir:
dir_mapping[req_dir] = fulldir
continue
transcript_files = {}
audio_files = {}
for dir in required_dirs:
assert dir in dir_mapping, "could not find the subdirectory {}".format(dir)
fulldir = dir_mapping[dir]
if "tran" in fulldir:
fulldir = os.path.join(fulldir, "data")
for dirpath, _, filenames in os.walk(fulldir):
for filename in filenames:
key = filename.split(".")[0]
if filename.startswith("fe_") and filename.endswith(".txt"):
transcript_files[key] = os.path.join(dirpath, filename)
elif filename.endswith(".sph"):
audio_files[key] = os.path.join(dirpath, filename)
return [(audio_files[k], transcript_files[k]) for k in audio_files]
def process_fisher_data(sample_data):
files, _, audio_path, sph2pipe = sample_data
sphfile, tfile = files
tmp_files = {}
for channel in ["A", "B"]:
tmp_files[channel] = os.path.join(
audio_path, "{pid}_tmp_{ch}.wav".format(pid=os.getpid(), ch=channel)
)
os.system(
"{sph} -f wav -c {c} {i} {o}".format(
sph=sph2pipe,
c=1 if channel == "A" else 2,
i=sphfile,
o=tmp_files[channel],
)
)
idx = 0
lines = []
with open(tfile, "r") as f:
first_line = f.readline().strip()
assert first_line.startswith("#") and first_line.endswith(".sph")
audiofileid = first_line.replace("#", "").replace(".sph", "").strip()
cur_audio_path = os.path.join(audio_path, audiofileid)
os.makedirs(cur_audio_path, exist_ok=True)
for line in f:
if line.startswith("#") or not line.strip():
continue
tag, text = line.strip().split(":", 1)
start, end, channel = tag.split()
start = float(start)
end = float(end)
utt = "{a}-{c}-{s}-{e}".format(
a=audiofileid,
c=channel,
s="{:06d}".format(int(start * 100 + 0.5)),
e="{:06d}".format(int(end * 100 + 0.5)),
)
# ignore uncertain annotations
if "((" in text:
continue
# lower-case
text = text.lower()
# remove punctuation
text = text.replace("?", "")
text = text.replace(",", "")
# simplify noise annotations
text = text.replace("[[skip]]", "")
text = text.replace("[pause]", "")
text = text.replace("[laugh]", "[laughter]")
text = text.replace("[sigh]", "[noise]")
text = text.replace("[cough]", "[noise]")
text = text.replace("[mn]", "[noise]")
text = text.replace("[breath]", "[noise]")
text = text.replace("[lipsmack]", "[noise]")
text = text.replace("[sneeze]", "[noise]")
text = " ".join(text.split())
out_file = os.path.join(cur_audio_path, "{:09d}.flac".format(idx))
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="flac", encoding="signed-integer", bits=16
)
sox_tfm.trim(start, end)
sox_tfm.build(tmp_files[channel], out_file)
duration = (end - start) * 1000.0
idx = idx + 1
lines.append("\t".join([utt, out_file, "{0:.2f}".format(duration), text]))
# cleanup
for tmp in tmp_files.values():
os.remove(tmp)
return lines
| 4,894 | 32.074324 | 86 |
py
|
wav2letter
|
wav2letter-main/recipes/data/fisher/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to package original Fisher datasets into a form readable in
wav2letter++ pipelines
Command : python3 prepare.py --dst [...]
Replace [...] with appropriate path
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
from multiprocessing import Pool
import numpy
from tqdm import tqdm
from utils import find_files, process_fisher_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Fisher Dataset creation.")
parser.add_argument(
"--src",
help="comma-separated directories containing Fisher data -"
"/path/to/LDC2004T19,/path/to/LDC2005T19,"
"/path/to/LDC2004S13,/path/to/LDC2005S13",
)
parser.add_argument(
"--dst", help="destination directory where to store data", default="./fisher"
)
parser.add_argument(
"--sph2pipe",
help="path to sph2pipe executable",
default="./sph2pipe_v2.5/sph2pipe",
)
parser.add_argument(
"-p",
"--process",
help="number of process for multiprocessing",
default=8,
type=int,
)
args = parser.parse_args()
files = find_files(args.src)
assert len(files) == 11699, (
"Expected to find 11699 .sph and transcript files in the Fisher "
"data, found {}".format(len(files))
)
audio_path = os.path.join(args.dst, "audio")
text_path = os.path.join(args.dst, "text")
lists_path = os.path.join(args.dst, "lists")
os.makedirs(audio_path, exist_ok=True)
os.makedirs(text_path, exist_ok=True)
os.makedirs(lists_path, exist_ok=True)
n_samples = len(files)
with Pool(args.process) as p:
processed_lines = list(
tqdm(
p.imap(
process_fisher_data,
zip(
files,
numpy.arange(n_samples),
[audio_path] * n_samples,
[args.sph2pipe] * n_samples,
),
),
total=n_samples,
)
)
processed_lines_flat = [item for sublist in processed_lines for item in sublist]
with open(os.path.join(lists_path, "fisher.lst"), "w") as ffile:
ffile.write("\n".join([l for l in processed_lines_flat if l]))
| 2,563 | 28.471264 | 85 |
py
|
wav2letter
|
wav2letter-main/recipes/data/switchboard/utils.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import sox
def process_hub5_data(sample_data):
line, idx, hub5_sdir, hub5_audio_path, sph2pipe = sample_data
if (not line) or line.startswith(";;") or ("IGNORE_TIME_SEGMENT_" in line):
return None
parts = line.strip().split()
transcript = " ".join(parts[6:])
transcript = transcript.replace("((", "(")
transcript = transcript.replace("<B_ASIDE>", "")
transcript = transcript.replace("<A_ASIDE>", "")
spk = "{}-{}".format(parts[0], parts[1])
start = float(parts[3])
end = float(parts[4])
utt = "{u}_{s}-{e}".format(
u=spk, s="{:06d}".format(int(start * 100)), e="{:06d}".format(int(end * 100))
)
in_file = os.path.join(hub5_sdir, "english", parts[0] + ".sph")
out_file = os.path.join(hub5_audio_path, "{:09d}.flac".format(idx))
tmp_file = os.path.join(hub5_audio_path, "{pid}_tmp.wav".format(pid=os.getpid()))
os.system(
"{sph} -f wav -c {c} {i} {o}".format(
sph=sph2pipe, c=1 if parts[1] == "A" else 2, i=in_file, o=tmp_file
)
)
assert (
sox.file_info.duration(tmp_file) > 0
), "Audio file {} duration is zero.".format(in_file)
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(file_type="flac", encoding="signed-integer", bits=16)
sox_tfm.trim(start, end)
sox_tfm.build(tmp_file, out_file)
os.remove(tmp_file)
duration = (end - start) * 1000.0
return "\t".join([utt, out_file, "{0:.2f}".format(duration), transcript.lower()])
def normalize_acronyms(line, acronym_dict):
# Taken from https://git.io/fjhbu
# Original Author - Minhua Wu
dict_acronym = {}
dict_acronym_noi = {} # Mapping of acronyms without I, i
for k, v in acronym_dict.items():
dict_acronym[k] = v.strip()
dict_acronym_noi[k] = v.strip()
del dict_acronym_noi["i"]
del dict_acronym_noi["I"]
line = "<dummy-id> " + line.strip()
items = line.split()
L = len(items)
# First pass mapping to map I as part of acronym
for i in range(L):
if items[i] == "i":
x = 0
while i - 1 - x >= 0 and re.match(r"^[A-Z]$", items[i - 1 - x]):
x += 1
y = 0
while i + 1 + y < L and re.match(r"^[A-Z]$", items[i + 1 + y]):
y += 1
if x + y > 0:
for bias in range(-x, y + 1):
items[i + bias] = dict_acronym[items[i + bias]]
# Second pass mapping (not mapping 'i' and 'I')
for i in range(len(items)):
if items[i] in dict_acronym_noi.keys():
items[i] = dict_acronym_noi[items[i]]
return " ".join(items[1:])
def sanitize(transcript, acronym_dict):
cleaned_words = ""
for word in transcript.split():
# Remove silence
word = word.replace("[silence]", "")
# Remove <b_aside>, <e_aside> (background conversation indicators)
word = word.replace("<b_aside>", "")
word = word.replace("<e_aside>", "")
# Use special noise symbol for [vocalized-noise].
# NOTE: Kaldi doesn't do this
word = word.replace("[vocalized-noise]", "[noise]")
# For word containing laughter, replace [laughter-word] by word
# (these word are still properly understood)
# also handle cases like [laughter-ou[r]-]
word = re.sub(r"(-?)\[laughter\-([\S]+)\](-?)", r"\1\2\3", word)
# for anomalous word like [Bamorghini/Lamborghini], we consider the first
# word as it matches more with the pronounciation
word = re.sub(r"\[(\S+)\/\S+\]", r"\1", word)
# handle an incorrect input: 'ex[specially]-/especially]'
word = re.sub("ex.specially...especially.", "ex-", word)
# For partial word like -[Substi]tute use '-tute' in word transcription
word = re.sub(r"ammu\[n\]it", r"ammu-it", word) # handle case 'ammu[n]it[ion]-'
word = re.sub(r"\-\[[^\]\s]+\]", r"-", word)
word = re.sub(r"\[[^\[\s]+\]\-", r"-", word)
# for coinages like {DJed}, {yuppyish} remove curly braces around them
word = re.sub(r"[\{\}]+", r"", word)
# For common alternate pronunciations like about_1 -> b aw t, them_1 eh m,
# remove '_1'
word = re.sub(r"_\d$", r"", word)
word = re.sub(r"them_1's", r"them's", word) # handle case 'them_1's'
cleaned_words += word + " "
# Normalize acronyms to Fisher format BBC -> b._b._c.
return normalize_acronyms(cleaned_words, acronym_dict)
def process_swbd_data(sample_data):
data, _, swbd_audio_path, sph2pipe, acronym_dict = sample_data
id, sphfile, chA, chB = data
tmp_file = os.path.join(swbd_audio_path, "{pid}_tmp.wav".format(pid=os.getpid()))
cur_audio_path = os.path.join(swbd_audio_path, id)
os.makedirs(cur_audio_path, exist_ok=True)
idx = 0
lines = []
for channel in ["A", "B"]:
os.system(
"{sph} -f wav -c {c} {i} {o}".format(
sph=sph2pipe, c=1 if channel == "A" else 2, i=sphfile, o=tmp_file
)
)
assert (
sox.file_info.duration(tmp_file) > 0
), "Audio file {} duration is zero.".format(sphfile)
with open(chA if channel == "A" else chB, "r") as f:
for line in f:
name = line[0:6].replace("sw", "sw0")
channel = line[6]
splits = line.strip().split(" ", 3)
start = float(splits[1])
end = float(splits[2])
transcript = sanitize(splits[3], acronym_dict)
if not transcript:
continue
utt = "{n}-{c}_{s}-{e}".format(
n=name,
c=channel,
s="{:06d}".format(int(start * 100 + 0.5)),
e="{:06d}".format(int(end * 100 + 0.5)),
)
out_file = os.path.join(cur_audio_path, "{:09d}.flac".format(idx))
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="flac", encoding="signed-integer", bits=16
)
sox_tfm.trim(start, end)
sox_tfm.build(tmp_file, out_file)
duration = (end - start) * 1000.0
idx = idx + 1
lines.append(
"\t".join(
[utt, out_file, "{0:.2f}".format(duration), transcript.lower()]
)
)
os.remove(tmp_file)
return lines
| 6,637 | 37.593023 | 88 |
py
|
wav2letter
|
wav2letter-main/recipes/data/switchboard/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to package Switchboard, Hub05 datasets into a form readable in
wav2letter++ pipelines
Command : python3 prepare.py [-h] [--src SRC] [--dst DST] [--hub5_sdir HUB5_SDIR]
[--hub5_tdir HUB5_TDIR] [--sph2pipe SPH2PIPE] [-p PROCESS]
Replace [...] with appropriate path
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import sys
from multiprocessing import Pool
import numpy
from tqdm import tqdm
from utils import process_hub5_data, process_swbd_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Switchboard Dataset creation.")
parser.add_argument(
"--src",
help="path to directory containing Switchboard data - /path/to/LDC97S62,",
)
parser.add_argument(
"--dst", help="destination directory where to store data", default="./swbd"
)
parser.add_argument(
"--hub5_sdir",
default=None,
help="path to hub dataset containing speech data - /path/to/LDC2002S09/"
"('<hub5_sdir>/english' must exist)",
)
parser.add_argument(
"--hub5_tdir",
default=None,
help="path to hub dataset containing transcript data "
" - /path/to/LDC2002T43. ('<hub5_tdir>/reference' must exist)",
)
parser.add_argument(
"--sph2pipe",
help="path to sph2pipe executable",
default="./sph2pipe_v2.5/sph2pipe",
)
parser.add_argument(
"-p",
"--process",
help="number of process for multiprocessing",
default=8,
type=int,
)
args = parser.parse_args()
assert os.path.exists(args.sph2pipe), "sph2pipe not found '{d}'".format(
d=args.sph2pipe
)
audio_path = os.path.join(args.dst, "audio")
os.makedirs(audio_path, exist_ok=True)
text_path = os.path.join(args.dst, "text")
os.makedirs(text_path, exist_ok=True)
lists_path = os.path.join(args.dst, "lists")
os.makedirs(lists_path, exist_ok=True)
misc_path = os.path.join(args.dst, "misc")
os.makedirs(misc_path, exist_ok=True)
# hub dataset preparation
if args.hub5_tdir and args.hub5_sdir:
print("Preparing Hub'05 data ...", flush=True)
hub5_audio_path = os.path.join(audio_path, "hub05")
os.makedirs(hub5_audio_path, exist_ok=True)
stm = os.path.join(args.hub5_tdir, "reference", "hub5e00.english.000405.stm")
lines = [line.strip() for line in open(stm, "r")]
n_samples = len(lines)
with Pool(args.process) as p:
processed_lines = list(
tqdm(
p.imap(
process_hub5_data,
zip(
lines,
numpy.arange(n_samples),
[args.hub5_sdir] * n_samples,
[hub5_audio_path] * n_samples,
[args.sph2pipe] * n_samples,
),
),
total=n_samples,
)
)
with open(os.path.join(lists_path, "hub05-switchboard.lst"), "w") as sfile:
sfile.write(
"\n".join([l for l in processed_lines if l and l.startswith("sw")])
)
with open(os.path.join(lists_path, "hub05-callhome.lst"), "w") as cfile:
cfile.write(
"\n".join([l for l in processed_lines if l and l.startswith("en")])
)
else:
print(
"--hub5_tdir and/or --hub5_sdir is empty. Not preparing Hub'05 data.",
flush=True,
)
print("Preparing Switchboard data ...", flush=True)
swbd_audio_path = os.path.join(audio_path, "switchboard")
os.makedirs(swbd_audio_path, exist_ok=True)
swbd_trans_path = os.path.join(misc_path, "swb_ms98_transcriptions")
if not os.path.exists(swbd_trans_path):
os.system(
"wget -qO- http://www.openslr.org/resources/5/"
"switchboard_word_alignments.tar.gz "
"| tar xz -C {dir}".format(dir=misc_path)
)
# load acronyms
acronym_dict = {}
with open(os.path.join(sys.path[0], "acronyms_swbd.map"), "r") as f:
for line in f:
a, b = line.strip().split("\t")
acronym_dict[a] = b
data = {}
for dirpath, _, filenames in os.walk(swbd_trans_path):
for filename in filenames:
if filename.endswith("-trans.text"):
id = filename[2:6] # Guaranteed to be id by swb manual
if id not in data:
data[id] = [id, None, None, None]
channel = filename[6]
if channel == "A":
data[id][2] = os.path.join(dirpath, filename)
if channel == "B":
data[id][3] = os.path.join(dirpath, filename)
for dirpath, _, filenames in os.walk(args.src):
for filename in filenames:
if filename.endswith(".sph"):
id = filename.replace("sw0", "")[:4]
assert id in data
data[id][1] = os.path.join(dirpath, filename)
n_samples = len(data)
with Pool(args.process) as p:
processed_lines = list(
tqdm(
p.imap(
process_swbd_data,
zip(
data.values(),
numpy.arange(n_samples),
[swbd_audio_path] * n_samples,
[args.sph2pipe] * n_samples,
[acronym_dict] * n_samples,
),
),
total=n_samples,
)
)
processed_lines_flat = [item for sublist in processed_lines for item in sublist]
with open(os.path.join(lists_path, "switchboard.lst"), "w") as sfile:
sfile.write("\n".join([l for l in processed_lines_flat if l]))
| 6,182 | 32.786885 | 85 |
py
|
wav2letter
|
wav2letter-main/data/timit/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to package original Timit dataset into a form readable in
wav2letter++ pipelines
Please install `sph2pipe` on your own -
see https://www.ldc.upenn.edu/language-resources/tools/sphere-conversion-tools \
with commands :
wget https://www.ldc.upenn.edu/sites/www.ldc.upenn.edu/files/ctools/sph2pipe_v2.5.tar.gz
tar -xzf sph2pipe_v2.5.tar.gz && cd sph2pipe_v2.5
gcc -o sph2pipe *.c -lm
Command : python3 prepare.py --src [...]/timit --dst [...] \
--sph2pipe [...]/sph2pipe_v2.5/sph2pipe
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
from multiprocessing import Pool
import numpy
import sox
from tqdm import tqdm
def copy_to_flac(info):
src, name, dst, idx, sph2pipe = info
original_path = os.path.join(src, name)
path = os.path.join(dst, "%09d" % idx) + ".flac"
if not os.path.exists(path):
tmp_file = os.path.join(dst, "{pid}_tmp.wav".format(pid=os.getpid()))
os.system(
"{sph} -f wav {i} {o}".format(sph=sph2pipe, i=original_path, o=tmp_file)
)
assert (
sox.file_info.duration(tmp_file) > 0
), "Audio file {} duration is zero.".format(original_path)
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(file_type="flac", encoding="signed-integer", bits=16)
sox_tfm.build(tmp_file, path)
os.remove(tmp_file)
duration = sox.file_info.duration(path) * 1000 # miliseconds
transcripts = dict()
for target_type in [".PHN", ".WRD"]:
targets = []
target_file = original_path.replace(".WAV", target_type)
with open(target_file, "r") as f:
for line in f:
start, end, token = line.strip().split()
assert start and end and token, "Something wrong with {} file".format(
target_file
)
targets.append(token)
transcripts[target_type] = " ".join(targets)
return (name, path, duration, transcripts[".WRD"], transcripts[".PHN"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Timit Dataset creation.")
parser.add_argument(
"--src", help="Source directory with downloaded and unzipped TIMIT data"
)
parser.add_argument("--dst", help="destination directory", default="./timit")
parser.add_argument(
"-p", "--process", help="# of process for Multiprocessing", default=8, type=int
)
parser.add_argument(
"--sph2pipe",
help="path to sph2pipe executable",
default="./sph2pipe_v2.5/sph2pipe",
)
args = parser.parse_args()
assert os.path.isdir(str(args.src)), "Timit directory is not found - '{d}'".format(
d=args.src
)
assert os.path.exists(args.sph2pipe), "sph2pipe not found '{d}'".format(
d=args.sph2pipe
)
current_dir = os.path.dirname(__file__)
audio_path = os.path.join(args.dst, "audio")
text_path = os.path.join(args.dst, "text")
lists_path = os.path.join(args.dst, "lists")
os.makedirs(audio_path, exist_ok=True)
os.makedirs(text_path, exist_ok=True)
os.makedirs(lists_path, exist_ok=True)
# read phone tokens
phones = []
in_phn_path = os.path.join(current_dir, "phones.txt")
with open(in_phn_path, "r") as f_phones:
phones = [[tkn.strip() for tkn in line.split()] for line in f_phones]
phones = set(numpy.concatenate(phones))
assert (
len(phones) == 61
), "Wrong number of phones, should be 61 instrad of {}".format(len(phones))
assert os.path.exists(os.path.join(args.src, "timit")) or os.path.exists(
os.path.join(args.src, "TIMIT")
), "TIMIT data are corrupted, there is no TIMIT or timit subdirectory"
upper_case = True if os.path.exists(os.path.join(args.src, "TIMIT")) else False
def process_path(path, upper_case):
return path.upper() if upper_case else path
# prepare audio, text and lists
for ds_type in ["train", "valid", "test"]:
print("Writing TIMIT {} data part".format(ds_type), flush=True)
data_list = os.path.join(current_dir, ds_type + ".lst")
with open(data_list, "r") as f_paths:
src_audio_files = [
process_path(os.path.join("timit", fname.strip()), upper_case)
for fname in f_paths
if fname.strip() != ""
]
ds_dst = os.path.join(audio_path, ds_type)
os.makedirs(ds_dst, exist_ok=True)
n_samples = len(src_audio_files)
with Pool(args.process) as p:
samples_info = list(
tqdm(
p.imap(
copy_to_flac,
zip(
[args.src] * n_samples,
src_audio_files,
[ds_dst] * n_samples,
numpy.arange(n_samples),
[args.sph2pipe] * n_samples,
),
),
total=n_samples,
)
)
with open(
os.path.join(lists_path, "{}.phn.lst".format(ds_type)), "w"
) as flist, open(
os.path.join(lists_path, "{}.lst".format(ds_type)), "w"
) as fwlist, open(
os.path.join(text_path, "{}.phn.txt".format(ds_type)), "w"
) as ftlist, open(
os.path.join(text_path, "{}.txt".format(ds_type)), "w"
) as ftwlist:
for sample in samples_info:
flist.write(
"{}\t{}\t{}\t{}\n".format(
sample[0], sample[1], sample[2], sample[4]
)
)
fwlist.write(
"{}\t{}\t{}\t{}\n".format(
sample[0], sample[1], sample[2], sample[3]
)
)
assert (
len(set(sample[4].split(" ")) - phones) == 0
), "Wrong phones in the transcription for sample {}".format(sample[0])
ftlist.write("{}\n".format(sample[4]))
ftwlist.write("{}\n".format(sample[3]))
print("Done!", flush=True)
| 6,512 | 34.205405 | 90 |
py
|
wav2letter
|
wav2letter-main/data/ami/utils.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import os
import random
from collections import namedtuple
import sox
Speaker = namedtuple("Speaker", ["id", "gender"])
FileRecord = namedtuple("FileRecord", ["fid", "length", "speaker"])
def split_audio(line):
apath, meetid, hset, spk, start, end, transcript = line.strip().split(" ", 6)
key = "_".join([meetid, hset, spk, start, end])
os.makedirs(os.path.join(apath, "segments", meetid), exist_ok=True)
idx = hset[-1]
fn = f"{meetid}.Headset-{idx}.wav"
infile = os.path.join(apath, meetid, fn)
assert os.path.exists(infile), f"{infile} doesn't exist"
new_path = os.path.join(apath, "segments", meetid, key + ".flac")
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="flac", encoding="signed-integer", bits=16, rate=16000
)
start = float(start)
end = float(end)
sox_tfm.trim(start, end)
sox_tfm.build(infile, new_path)
sx_dur = sox.file_info.duration(new_path)
if sx_dur is not None and abs(sx_dur - end + start) < 0.5:
return [meetid, key, new_path, str(round(sx_dur * 1000, 2)), transcript.lower()]
def do_split(all_records, spkrs, total_seconds, handles_chosen=None):
"""
Greedily selecting speakers, provided we don't go over budget
"""
time_taken = 0.0
records_filtered = []
idx = 0
speakers = copy.deepcopy(spkrs)
current_speaker_time = {spk: 0 for spk in speakers}
current_speaker_idx = {spk: 0 for spk in speakers}
while True:
if len(speakers) == 0:
break
speaker = speakers[idx % len(speakers)]
idx += 1
tocontinue = False
while True:
cur_spk_idx = current_speaker_idx[speaker]
if cur_spk_idx == len(all_records[speaker]):
speakers.remove(speaker)
tocontinue = True
break
cur_record = all_records[speaker][cur_spk_idx]
current_speaker_idx[speaker] += 1
if handles_chosen is None or cur_record.fid not in handles_chosen:
break
if tocontinue:
continue
records_filtered.append(cur_record)
time_taken += cur_record.length
current_speaker_time[speaker] += cur_record.length
if abs(time_taken - total_seconds) < 10:
break
return records_filtered, time_taken
def get_speakers(train_file):
cache = {}
all_speakers = []
with open(train_file) as f:
for line in f:
spl = line.split()
speaker_id = spl[0].split("_")[2]
gender = speaker_id[0]
if gender not in ["M", "F"]:
continue
if speaker_id not in cache:
cache[speaker_id] = 1
speaker = Speaker(id=speaker_id, gender=gender)
all_speakers.append(speaker)
return all_speakers
def get_fid2length(train_file):
fids = []
lengths = []
with open(train_file) as f:
for line in f:
spl = line.split()
fids.append(spl[0])
lengths.append(float(spl[2]) / 1000)
return list(zip(fids, lengths))
def full_records(speakers, fid2length, subset_name=None):
all_records = []
speakers = {(speaker.id, speaker) for speaker in speakers}
for fid, length in fid2length:
speaker = fid.split("_")[2]
assert speaker in speakers, f"Unknown speaker! {speaker}"
speaker = speakers[speaker]
if subset_name is not None:
assert subset_name == speaker.subset
frecord = FileRecord(speaker=speaker, length=length, fid=fid)
all_records.append(frecord)
return all_records
def get_speaker2time(records, lambda_key, lambda_value):
from collections import defaultdict
key_value = defaultdict(int)
for record in records:
key = lambda_key(record)
value = lambda_value(record)
key_value[key] += value
return key_value
def create_limited_sup(list_dir):
random.seed(0)
train_file = os.path.join(list_dir, "train.lst")
assert os.path.exists(train_file)
speakers = get_speakers(train_file)
print("Found speakers", len(speakers))
write_records = {}
chosen_records = {}
fid2length = get_fid2length(train_file)
all_records = full_records(speakers, fid2length)
for gender in ["M", "F"]:
print(f"Selecting from gender {gender}")
records = [rec for rec in all_records if rec.speaker.gender == gender]
speaker2time = get_speaker2time(
records, lambda_key=lambda r: r.speaker.id, lambda_value=lambda r: r.length
)
# select 15 random speakers
min_minutes_per_speaker = 15
speakers_10hr = {
r.speaker.id
for r in records
if speaker2time[r.speaker.id] >= min_minutes_per_speaker * 60
}
speakers_10hr = sorted(speakers_10hr)
random.shuffle(speakers_10hr)
speakers_10hr = speakers_10hr[:15]
print(f"Selected speakers from gender {gender} ", speakers_10hr)
cur_records = {}
for speaker in speakers_10hr:
cur_records[speaker] = [r for r in records if r.speaker.id == speaker]
random.shuffle(cur_records[speaker])
# 1 hr as 6 x 10min splits
key = "10min_" + gender
write_records[key] = {}
for i in range(6):
speakers_10min = random.sample(set(speakers_10hr), 3)
write_records[key][i], _ = do_split(
cur_records, speakers_10min, 10 * 60 / 2, chosen_records
)
for kk in write_records[key][i]:
chosen_records[kk.fid] = 1
# 9 hr
key = "9hr_" + gender
write_records[key], _ = do_split(
cur_records, speakers_10hr, (9 * 60 * 60) / 2, chosen_records
)
train_lines = {}
with open(train_file) as f:
for line in f:
train_lines[line.split()[0]] = line.strip()
print("Writing 6 x 10min list files...")
for i in range(6):
with open(os.path.join(list_dir, f"train_10min_{i}.lst"), "w") as fo:
for record in write_records["10min_M"][i] + write_records["10min_F"][i]:
fo.write(train_lines[record.fid])
print("Writing 9hr list file...")
with open(os.path.join(list_dir, "train_9hr.lst"), "w") as fo:
for record in write_records["9hr_M"] + write_records["9hr_F"]:
fo.write(train_lines[record.fid])
| 6,772 | 31.099526 | 88 |
py
|
wav2letter
|
wav2letter-main/data/ami/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to package original AMI dataset into a form readable in
wav2letter++ pipelines
Command : python3 prepare.py --dst [...]
Replace [...] with appropriate path
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
from multiprocessing import Pool
from tqdm import tqdm
from utils import split_audio, create_limited_sup
LOG_STR = " To regenerate this file, please, remove it."
MIN_DURATION_MSEC = 50 # 50 msec
MAX_DURATION_MSEC = 30000 # 30 sec
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="AMI Dataset creation.")
parser.add_argument(
"--dst",
help="destination directory where to store data",
default="./ami",
)
parser.add_argument(
"-p",
"--process",
help="number of process for multiprocessing",
default=8,
type=int,
)
args = parser.parse_args()
splits = {"train": [], "dev": [], "test": []}
audio_path = os.path.join(args.dst, "audio")
text_path = os.path.join(args.dst, "text")
lists_path = os.path.join(args.dst, "lists")
os.makedirs(audio_path, exist_ok=True)
os.makedirs(text_path, exist_ok=True)
os.makedirs(lists_path, exist_ok=True)
audio_http = "http://groups.inf.ed.ac.uk/ami"
# Download the audio data
print("Downloading the AMI audio data...", flush=True)
cmds = []
for split in splits.keys():
with open(os.path.join("splits", f"split_{split}.orig")) as f:
for line in f:
line = line.strip()
splits[split].append(line)
cur_audio_path = os.path.join(audio_path, line)
os.makedirs(cur_audio_path, exist_ok=True)
num_meetings = 5 if line in ["EN2001a", "EN2001d", "EN2001e"] else 4
for meetid in range(num_meetings):
cmds.append(
f"wget -nv --continue -o /dev/null -P {cur_audio_path} {audio_http}/AMICorpusMirror/amicorpus/{line}/audio/{line}.Headset-{meetid}.wav"
)
for i in tqdm(range(len(cmds))):
os.system(cmds[i])
print("Downloading the text data ...", flush=True)
annotver = "ami_public_manual_1.6.1.zip"
cmd = f"wget -nv --continue -o /dev/null -P {text_path} {audio_http}/AMICorpusAnnotations/{annotver};"
cmd = cmd + f"mkdir -p {text_path}/annotations;"
cmd = cmd + f"unzip -q -o -d {text_path}/annotations {text_path}/{annotver} ;"
os.system(cmd)
print("Parsing the transcripts ...", flush=True)
cmd = f"sh ami_xml2text.sh {text_path};"
os.system(cmd)
cmd = f"perl ami_split_segments.pl {text_path}/annotations/transcripts1 {text_path}/annotations/transcripts2 2>&1 > {text_path}/annotations/split_segments.log"
os.system(cmd)
# Prepare the audio data
print("Segmenting audio files...", flush=True)
with open(f"{text_path}/annotations/transcripts2") as f:
lines = f.readlines()
lines = [audio_path + " " + line for line in lines]
os.makedirs(os.path.join(audio_path, "segments"), exist_ok=True)
with Pool(args.process) as p:
samples = list(
tqdm(
p.imap(split_audio, lines),
total=len(lines),
)
)
samples = [s for s in samples if s is not None] # filter None values
print("Wrote {} audio segment samples".format(len(samples)))
print("Writing to list files...", flush=True)
for split, meetings in splits.items():
cur_samples = [s for s in samples if s[0] in meetings]
with open(os.path.join(lists_path, f"{split}.lst"), "w") as fout:
for sample in cur_samples:
if (
float(sample[3]) > MIN_DURATION_MSEC
and float(sample[3]) < MAX_DURATION_MSEC
):
fout.write("\t".join(sample[1:]) + "\n")
print("Preparing limited supervision subsets", flush=True)
create_limited_sup(lists_path)
print("Done!", flush=True)
| 4,255 | 34.173554 | 163 |
py
|
wav2letter
|
wav2letter-main/data/wsj/utils.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import sox
def preprocess_word(word):
word = re.sub(r"^~$", "", word)
word = re.sub(r"^~~$", "", word)
word = re.sub(r"\\", "", word)
word = re.sub(r"^\[<\S+\]$", "", word)
word = re.sub(r"^\[\S+>\]$", "", word)
word = re.sub(r"^\[\S+/\]$", "", word)
word = re.sub(r"^\[/\S+\]$", "", word)
word = re.sub(r"^\[\S+\]$", "", word) # NOISE
if re.match(r"^<\S+>$", word) and word != "<NOISE>":
word = word[1:-1]
word = word.replace("*", "") if re.match(r"^\*\S+\*", word) else word
word = re.sub(r"^%PERCENT$", "PERCENT", word)
word = re.sub(r"^\.POINT$", "POINT", word)
word = re.sub(r"`", "'", word) # typo
word = re.sub(r"^\(IN\-PARENTHESIS$", "(IN-PARENTHESES", word) # mispell
word = re.sub(r"^Corp;$", "Corp", word) # mispell
word = re.sub(r"^\-\-DASH$", "-DASH", word) # mispell
if word != ":COLON":
word = word.replace(":", "") # some emphasis stuff
if word != "!EXCLAMATION-POINT":
word = word.replace("!", "") # some emphasis stuff
word = re.sub(r"^\.$", "", word)
word = word.lower()
return word
def find_transcripts(dst_paths):
transcripts = dict()
for ds_path in dst_paths:
for dirpath, _, filenames in os.walk(ds_path):
for filename in filenames:
if not filename.endswith(".dot"):
continue
full_path = os.path.join(dirpath, filename)
subset = full_path.split(os.sep)[-3]
assert subset, "Subset is empty"
transcripts.setdefault(subset, dict())
with open(full_path, "r") as f:
for line in f:
transcript, file_id = line.strip().rsplit(" ", 1)
file_id = file_id.strip("()")
if not transcript or not file_id:
continue
if subset in transcripts and file_id in transcripts[subset]:
assert (
transcripts[subset][file_id] == transcript
), "different transcriptions available for {i}".format(
i=file_id
)
transcripts[subset][file_id] = transcript
return transcripts
def ndx_to_samples(prefix, filename, transcripts, transform=None, sep="-"):
samples_list = []
with open(os.path.join(prefix, filename), "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith(";"):
continue
if transform is not None:
line = transform(line)
if line is None:
continue
pre, suf = line.split(":")
p1, p2, p3 = pre.split("_")
suf = suf.lstrip(" /")
ds, subset, _, sample_id = suf.replace(".wv1", "").rsplit("/", 3)
fname = os.path.join(prefix, "{}{}{}.{}".format(p1, sep, p2, p3), suf)
assert os.path.exists(fname), "Audio file {} doesn't exist".format(fname)
assert (
subset in transcripts
), "Subset {} is absent in the transcription".format(subset)
assert (
sample_id in transcripts[subset]
), "Id {} is absent in the subset {} of transcription for file {}".format(
sample_id, subset, fname
)
samples_list.append(
{
"id": sample_id,
"filename": fname,
"subset": subset,
"transcript": transcripts[subset][sample_id],
"basename": os.path.join("{}{}{}.{}".format(p1, sep, p2, p3), suf),
}
)
samples_list.sort(key=lambda x: x["id"])
return samples_list
def convert_to_flac(sample_data):
sample, idx, dst, sph2pipe = sample_data
filename = sample["filename"]
out_prefix = os.path.join(dst, "%09d" % idx)
# flac
if not os.path.exists(out_prefix + ".flac"):
tmp_file = os.path.join(dst, "{pid}_tmp.wav".format(pid=os.getpid()))
os.system("{sph} -f wav {i} {o}".format(sph=sph2pipe, i=filename, o=tmp_file))
assert (
sox.file_info.duration(tmp_file) > 0
), "Audio file {} duration is zero.".format(filename)
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(file_type="flac", encoding="signed-integer", bits=16)
sox_tfm.build(tmp_file, out_prefix + ".flac")
os.remove(tmp_file)
duration = sox.file_info.duration(out_prefix + ".flac") * 1000 # miliseconds
transcript = " ".join(
[preprocess_word(word) for word in sample["transcript"].split()]
)
transcript = re.sub(" +", " ", transcript).strip()
return [sample["basename"], out_prefix + ".flac", str(duration), transcript]
| 5,257 | 36.557143 | 87 |
py
|
wav2letter
|
wav2letter-main/data/wsj/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to package original WSJ datasets into a form readable in wav2letter++
pipelines
Please install `sph2pipe` on your own -
see https://www.ldc.upenn.edu/language-resources/tools/sphere-conversion-tools \
with commands :
wget https://www.ldc.upenn.edu/sites/www.ldc.upenn.edu/files/ctools/sph2pipe_v2.5.tar.gz
tar -xzf sph2pipe_v2.5.tar.gz && cd sph2pipe_v2.5
gcc -o sph2pipe *.c -lm
Command : python3 prepare.py --wsj0 [...]/WSJ0/media \
--wsj1 [...]/WSJ1/media --dst [...] --sph2pipe [...]/sph2pipe_v2.5/sph2pipe
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import re
import subprocess
from multiprocessing import Pool
import numpy
from tqdm import tqdm
from utils import convert_to_flac, find_transcripts, ndx_to_samples, preprocess_word
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WSJ Dataset creation.")
parser.add_argument("--wsj0", help="top level directory containing all WSJ0 discs")
parser.add_argument("--wsj1", help="top level directory containing all WSJ1 discs")
parser.add_argument("--dst", help="destination directory", default="./wsj")
parser.add_argument(
"--wsj1_type",
help="if you are using larger corpus LDC94S13A, set parameter to `LDC94S13A`",
default="LDC94S13B",
)
parser.add_argument(
"--sph2pipe",
help="path to sph2pipe executable",
default="./sph2pipe_v2.5/sph2pipe",
)
parser.add_argument(
"-p", "--process", help="# of process for Multiprocessing", default=8, type=int
)
args = parser.parse_args()
wsj1_sep = "-" if args.wsj1_type == "LDC94S13A" else "_"
assert os.path.isdir(str(args.wsj0)), "WSJ0 directory is not found - '{d}'".format(
d=args.wsj0
)
assert os.path.isdir(str(args.wsj1)), "WSJ1 directory is not found - '{d}'".format(
d=args.wsj1
)
assert args.wsj0 != args.wsj1, "WSJ0 and WSJ1 directories can't be the same"
assert os.path.exists(args.sph2pipe), "sph2pipe not found '{d}'".format(
d=args.sph2pipe
)
# Prepare audio data
transcripts = find_transcripts([args.wsj0, args.wsj1])
subsets = dict()
subsets["si84"] = ndx_to_samples(
args.wsj0,
"11-13.1/wsj0/doc/indices/train/tr_s_wv1.ndx",
transcripts,
lambda line: None if "11_2_1:wsj0/si_tr_s/401" in line else line,
)
assert len(subsets["si84"]) == 7138, "Incorrect number of samples in si84 part:"
" should be 7138, but fould #{}.".format(len(subsets["si84"]))
subsets["si284"] = ndx_to_samples(
args.wsj0,
"11-13.1/wsj0/doc/indices/train/tr_s_wv1.ndx",
transcripts,
lambda line: None if "11_2_1:wsj0/si_tr_s/401" in line else line,
)
subsets["si284"] = subsets["si284"] + ndx_to_samples(
args.wsj1,
"13{}34.1/wsj1/doc/indices/si_tr_s.ndx".format(wsj1_sep),
transcripts,
None,
wsj1_sep,
)
assert len(subsets["si284"]) == 37416, "Incorrect number of samples in si284 part: "
"should be 37416, but fould {}.".format(len(subsets["si284"]))
subsets["nov92"] = ndx_to_samples(
args.wsj0,
"11-13.1/wsj0/doc/indices/test/nvp/si_et_20.ndx",
transcripts,
lambda line: line + ".wv1",
)
assert (
len(subsets["nov92"]) == 333
), "Incorrect number of samples in si284 part: should be 333, but fould {}.".format(
len(subsets["nov92"])
)
subsets["nov92_5k"] = ndx_to_samples(
args.wsj0,
"11-13.1/wsj0/doc/indices/test/nvp/si_et_05.ndx",
transcripts,
lambda line: line + ".wv1",
)
assert (
len(subsets["nov92_5k"]) == 330
), "Incorrect number of samples in si284 part: should be 330, but fould {}.".format(
len(subsets["nov92_5k"])
)
subsets["nov93"] = ndx_to_samples(
args.wsj1,
"13{}32.1/wsj1/doc/indices/wsj1/eval/h1_p0.ndx".format(wsj1_sep),
transcripts,
lambda line: line.replace("13_32_1", "13_33_1"),
wsj1_sep,
)
assert (
len(subsets["nov93"]) == 213
), "Incorrect number of samples in si284 part: should be 213, but fould {}.".format(
len(subsets["nov93"])
)
subsets["nov93_5k"] = ndx_to_samples(
args.wsj1,
"13{}32.1/wsj1/doc/indices/wsj1/eval/h2_p0.ndx".format(wsj1_sep),
transcripts,
lambda line: line.replace("13_32_1", "13_33_1"),
wsj1_sep,
)
assert (
len(subsets["nov93_5k"]) == 215
), "Incorrect number of samples in si284 part: should be 215, but fould {}.".format(
len(subsets["nov93_5k"])
)
subsets["nov93dev"] = ndx_to_samples(
args.wsj1,
"13{}34.1/wsj1/doc/indices/h1_p0.ndx".format(wsj1_sep),
transcripts,
None,
wsj1_sep,
)
assert (
len(subsets["nov93dev"]) == 503
), "Incorrect number of samples in si284 part: should be 503, but fould {}.".format(
len(subsets["nov93dev"])
)
subsets["nov93dev_5k"] = ndx_to_samples(
args.wsj1,
"13{}34.1/wsj1/doc/indices/h2_p0.ndx".format(wsj1_sep),
transcripts,
None,
wsj1_sep,
)
assert (
len(subsets["nov93dev_5k"]) == 513
), "Incorrect number of samples in si284 part: should be 513, but fould {}.".format(
len(subsets["nov93dev_5k"])
)
audio_path = os.path.join(args.dst, "audio")
text_path = os.path.join(args.dst, "text")
lists_path = os.path.join(args.dst, "lists")
os.makedirs(audio_path, exist_ok=True)
os.makedirs(text_path, exist_ok=True)
os.makedirs(lists_path, exist_ok=True)
transcription_words = set()
for set_name, samples in subsets.items():
n_samples = len(samples)
print(
"Writing {s} with {n} samples\n".format(s=set_name, n=n_samples), flush=True
)
data_dst = os.path.join(audio_path, set_name)
if os.path.exists(data_dst):
print(
"""The folder {} exists, existing flac for this folder will be skipped for generation.
Please remove the folder if you want to regenerate the data""".format(
data_dst
),
flush=True,
)
with Pool(args.process) as p:
os.makedirs(data_dst, exist_ok=True)
samples_info = list(
tqdm(
p.imap(
convert_to_flac,
zip(
samples,
numpy.arange(n_samples),
[data_dst] * n_samples,
[args.sph2pipe] * n_samples,
),
),
total=n_samples,
)
)
list_dst = os.path.join(lists_path, set_name + ".lst")
if not os.path.exists(list_dst):
with open(list_dst, "w") as f_list:
for sample_info in samples_info:
f_list.write(" ".join(sample_info) + "\n")
else:
print(
"List {} already exists, skip its generation."
" Please remove it if you want to regenerate the list".format(
list_dst
),
flush=True,
)
for sample_info in samples_info:
transcription_words.update(sample_info[3].lower().split(" "))
# Prepare text data
text_dst = os.path.join(text_path, set_name + ".txt")
if not os.path.exists(text_dst):
with open(text_dst, "w") as f_text:
for sample_info in samples_info:
f_text.write(sample_info[3] + "\n")
else:
print(
"Transcript text file {} already exists, skip its generation."
" Please remove it if you want to regenerate the list".format(text_dst),
flush=True,
)
# Prepare text data (for language model)
lm_paths = [
"13{}32.1/wsj1/doc/lng_modl/lm_train/np_data/87".format(wsj1_sep),
"13{}32.1/wsj1/doc/lng_modl/lm_train/np_data/88".format(wsj1_sep),
"13{}32.1/wsj1/doc/lng_modl/lm_train/np_data/89".format(wsj1_sep),
]
if not os.path.exists(os.path.join(text_path, "cmudict.0.7a")):
url = "http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict.0.7a"
cmd = "cd {} && wget {}".format(text_path, url)
os.system(cmd)
else:
print("CMU dict already exists, skip its downloading", flush=True)
allowed_words = []
with open(os.path.join(text_path, "cmudict.0.7a"), "r") as f_cmu:
for line in f_cmu:
line = line.strip()
if line.startswith(";;;"):
continue
allowed_words.append(line.split(" ")[0].lower())
lm_file = os.path.join(text_path, "lm.txt")
# define valid words for correct splitting into sentences with "."
existed_words = set.union(set(allowed_words), transcription_words)
existed_words = existed_words - {"prof."} # for reproducibility from lua code
if os.path.exists(lm_file):
print(
"LM data already exist, skip its generation."
" Please remove the file {} to regenerate it".format(lm_file),
flush=True,
)
else:
with open(lm_file, "w") as f_lm:
for path in lm_paths:
path = os.path.join(args.wsj1, path)
for filename in os.listdir(path):
if not filename.endswith(".z"):
continue
# Get text from zip files
filename = os.path.join(path, filename)
process = subprocess.Popen(
["zcat", filename], stdout=subprocess.PIPE
)
out, _ = process.communicate()
assert process.returncode == 0, "Error during zcat"
text_data = out.decode("utf-8")
text_data = text_data.lower()
# split several sentences into sequence (split if word contains
# dot only at the end and this word is absent
# in the existed words set)
text_data = " ".join(
[
word[:-1] + "\n"
if len(word) > 2
and word[-1] == "."
and "." not in word[:-1]
and word not in existed_words
else word
for word in text_data.split()
]
)
text_data = re.sub("<s[^>]+>", "<s>", text_data)
text_data = re.sub("<s>", "{", text_data)
text_data = re.sub("</s>", "}", text_data)
part_data = re.finditer(
r"\{(.*?)\}", text_data, re.MULTILINE | re.DOTALL
) # take the internal of {...}
for lines in part_data:
lines = lines.group(1).strip()
lines = re.sub(" +", " ", lines)
for line in lines.split("\n"):
sentence = []
for raw_word in line.split(" "):
word = preprocess_word(raw_word)
if len(word) > 0:
sentence.append(word)
if len(sentence) > 0:
f_lm.write(" ".join(sentence) + "\n")
print("Done!", flush=True)
| 12,172 | 35.776435 | 102 |
py
|
wav2letter
|
wav2letter-main/data/mls/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to package original MLS dataset into a form readable in
wav2letter++ pipelines
Command : python3 prepare.py --indir [...] --outdir [...]
Replace [...] with appropriate path
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="MLS Dataset preparation.")
parser.add_argument(
"--indir",
help="input directory of downloaded MLS dataset of a given language",
)
parser.add_argument(
"--outdir",
help="destination directory where to store data",
)
args = parser.parse_args()
os.makedirs(args.outdir, exist_ok=True)
lists_path = os.path.join(args.outdir, "lists")
os.makedirs(lists_path, exist_ok=True)
# Preparing the list file
for split in ["train", "dev", "test"]:
audio_path = os.path.join(args.indir, split, "audio")
segments_path = os.path.join(args.indir, split, "segments.txt")
transcripts_path = os.path.join(args.indir, split, "transcripts.txt")
list_out_path = os.path.join(lists_path, f"{split}.lst")
# read the segments file for audio durations
durations = {}
with open(segments_path) as f:
for line in f:
cols = line.split()
duration_ms = (float(cols[3]) - float(cols[2])) * 1000
durations[cols[0]] = "{:.2f}".format(duration_ms)
with open(list_out_path, 'w') as fo:
with open(transcripts_path) as fi:
for line in fi:
handle, transcript = line.split("\t")
speaker, book, idx = handle.split("_")
audio_file = os.path.join(audio_path, speaker, book, f"{handle}.flac")
assert os.path.exists(audio_file)
fo.write(handle + "\t" + audio_file + "\t" + durations[handle] + "\t" + transcript)
print("Done!", flush=True)
| 2,216 | 32.590909 | 103 |
py
|
wav2letter
|
wav2letter-main/data/utils/kaldi_to_listfile.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to package kaldi data directory into a form readable in
wav2letter++ pipelines
Command : python3 prepare.py --src [...] --dst [...]
Replace [...] with appropriate path
`src` directory is the path to kaldi data directory typically
prepared with `prepare_data.sh` script.
`dst` directory is the path to store (segmented) audio files and the
list file that is used by wav2letter++ pipelines to load data.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import argparse
import os
import re
from multiprocessing import Pool
import sox
from tqdm import tqdm
def run_segment(item):
uid, val = item
infile, start_sec, end_sec, outfile = val
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="flac", encoding="signed-integer", bits=16
)
sox_tfm.trim(start_sec, end_sec)
sox_tfm.build(infile, outfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Script to generate list file from Kaldi data dir"
)
parser.add_argument(
"--src",
help="input kaldi data directory. Must contain "
"'text', 'segments' and 'wav.scp' files",
)
parser.add_argument(
"--dst", help="destination directory where to store data",
)
parser.add_argument(
"--name", help="name of the output list file", default="data.lst"
)
parser.add_argument(
"-p",
"--process",
help="number of process for multiprocessing",
default=8,
type=int,
)
args = parser.parse_args()
wav_files = {}
cache = {}
cmds = []
with open(f"{args.src}/wav.scp") as f:
for line in f:
# handles two possible cases
# Case 1: ID followed by wav file
# Ex: S03_U01.CH1 /path/S03_U01.CH1.wav
# Case 2: ID followed by sox script
# Ex: P09_S03.L sox /path/S03_P09.wav -t wav - remix 1 |
wid, wav_handle = line.strip().split(" ", 1)
if wav_handle in cache:
wav_file = cache[wav_handle]
elif wav_handle.startswith("sox"):
hsh = re.sub("[^0-9a-zA-Z]+", "", wav_handle)
wav_file = "/tmp/{}.wav".format(hsh)
cmds.append(
wav_handle.replace(" - ", " " + wav_file + " ").replace(
"|", ""
)
)
else:
wav_file = wav_handle
wav_files[wid] = wav_file
print("Found {} wav files".format(len(wav_files)))
print("Running {} wav commands ...".format(len(cmds)))
def run_command(cmd):
os.system(cmd)
p = Pool(args.process)
list(tqdm(p.imap(run_command, cmds), total=len(cmds),))
transcripts = {}
with open(f"{args.src}/text") as f:
for line in f:
line_split = line.strip().split()
transcripts[line_split[0]] = " ".join(line_split[1:])
print("Found {} transcripts".format(len(transcripts)))
segments = {}
with open(f"{args.src}/segments") as f:
for line in f:
uid, wid, start_sec, end_sec = line.strip().split(" ", 3)
start_sec = float(start_sec)
end_sec = float(end_sec)
outfile = f"{args.dst}/audio/{uid}.flac"
segments[uid] = (wav_files[wid], start_sec, end_sec, outfile)
print("Found {} segments".format(len(segments)))
os.makedirs(f"{args.dst}", exist_ok=True)
os.makedirs(f"{args.dst}/audio", exist_ok=True)
print("Creating segmented audio files ...")
list(tqdm(p.imap(run_segment, segments.items()), total=len(segments),))
print("Writing to list file ...")
with open(f"{args.dst}/{args.name}", "w") as fo:
for uid, val in segments.items():
_, start_sec, end_sec, outfile = val
duration = "{:.2f}".format((end_sec - start_sec) * 1000)
fo.write(
"\t".join([uid, outfile, duration, transcripts[uid]]) + "\n"
)
print("Done!")
| 4,296 | 29.692857 | 76 |
py
|
wav2letter
|
wav2letter-main/data/librispeech/utils.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sox
def find_transcript_files(dir):
files = []
for dirpath, _, filenames in os.walk(dir):
for filename in filenames:
if filename.endswith(".trans.txt"):
files.append(os.path.join(dirpath, filename))
return files
def parse_speakers_gender(spk_file):
ret = {}
with open(spk_file, "r") as f:
for line in f:
if line.startswith(";"):
continue
sample_id, gen, _ = line.split("|", 2)
ret[sample_id.strip()] = gen.strip()
return ret
def transcript_to_list(file):
audio_path = os.path.dirname(file)
ret = []
with open(file, "r") as f:
for line in f:
file_id, trans = line.strip().split(" ", 1)
audio_file = os.path.abspath(os.path.join(audio_path, file_id + ".flac"))
duration = sox.file_info.duration(audio_file) * 1000 # miliseconds
ret.append([file_id, audio_file, str(duration), trans.lower()])
return ret
def read_list(src, files):
ret = []
for file in files:
with open(os.path.join(src, file + ".lst"), "r") as f:
for line in f:
_, _, _, trans = line.strip().split(" ", 3)
ret.append(trans)
return ret
| 1,574 | 26.155172 | 85 |
py
|
wav2letter
|
wav2letter-main/data/librispeech/prepare.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Script to package original Librispeech datasets into a form readable in
wav2letter++ pipelines
Command : python3 prepare.py --dst [...]
Replace [...] with appropriate path
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
from multiprocessing import Pool
import numpy
from tqdm import tqdm
from utils import find_transcript_files, transcript_to_list
LOG_STR = " To regenerate this file, please, remove it."
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Librispeech Dataset creation.")
parser.add_argument(
"--dst",
help="destination directory where to store data",
default="./librispeech",
)
parser.add_argument(
"-p",
"--process",
help="number of process for multiprocessing",
default=8,
type=int,
)
args = parser.parse_args()
subpaths = {
"train": ["train-clean-100", "train-clean-360", "train-other-500"],
"dev": ["dev-clean", "dev-other"],
"test": ["test-clean", "test-other"],
}
subpath_names = numpy.concatenate(list(subpaths.values()))
audio_path = os.path.join(args.dst, "audio")
text_path = os.path.join(args.dst, "text")
lists_path = os.path.join(args.dst, "lists")
os.makedirs(audio_path, exist_ok=True)
os.makedirs(text_path, exist_ok=True)
os.makedirs(lists_path, exist_ok=True)
audio_http = "http://www.openslr.org/resources/12/"
text_http = "http://www.openslr.org/resources/11/librispeech-lm-norm.txt.gz"
# Download the audio data
print("Downloading the Librispeech data.", flush=True)
for pname in subpath_names:
if not os.path.exists(os.path.join(audio_path, "LibriSpeech", pname)):
print("Downloading and unpacking {}...".format(pname))
cmd = """wget -c {http}{name}.tar.gz -P {path};
yes n 2>/dev/null | gunzip {path}/{name}.tar.gz;
tar -C {path} -xf {path}/{name}.tar"""
os.system(cmd.format(path=audio_path, http=audio_http, name=pname))
else:
log_str = "{} part of data exists, skip its downloading and unpacking"
print(log_str.format(pname) + LOG_STR, flush=True)
# Downloading text data for language model training
if not os.path.exists(os.path.join(text_path, "librispeech-lm-norm.txt")):
print("Downloading and unpacking text data...")
cmd = """wget -c {http} -P {path}; yes n 2>/dev/null |
gunzip {path}/librispeech-lm-norm.txt.gz"""
os.system(cmd.format(http=text_http, path=text_path))
else:
print("Text data exists, skip its downloading." + LOG_STR, flush=True)
# Prepare the audio data
print("Converting audio data into necessary format.", flush=True)
word_dict = {}
for subpath_type in subpaths.keys():
word_dict[subpath_type] = set()
for subpath in subpaths[subpath_type]:
src = os.path.join(audio_path, "LibriSpeech", subpath)
assert os.path.exists(src), "Unable to find the directory - '{src}'".format(
src=src
)
dst_list = os.path.join(lists_path, subpath + ".lst")
if os.path.exists(dst_list):
print(
"Path {} exists, skip its generation.".format(dst_list) + LOG_STR,
flush=True,
)
continue
print("Analyzing {src}...".format(src=src), flush=True)
transcript_files = find_transcript_files(src)
transcript_files.sort()
print("Writing to {dst}...".format(dst=dst_list), flush=True)
with Pool(args.process) as p:
samples = list(
tqdm(
p.imap(transcript_to_list, transcript_files),
total=len(transcript_files),
)
)
with open(dst_list, "w") as fout:
for sp in samples:
for s in sp:
word_dict[subpath_type].update(s[-1].split(" "))
s[0] = subpath + "-" + s[0]
fout.write(" ".join(s) + "\n")
# Prepare text data
current_path = os.path.join(text_path, "librispeech-lm-norm.txt.lower.shuffle")
if not os.path.exists(current_path):
print("Prepare text data in the necessary format.", flush=True)
numpy.random.seed(42)
text_data = []
with open(os.path.join(text_path, "librispeech-lm-norm.txt"), "r") as f_text:
for line in f_text:
line = line.strip().lower()
if line != "":
text_data.append(line)
indices = numpy.random.permutation(numpy.arange(len(text_data)))
with open(
os.path.join(text_path, "librispeech-lm-norm.txt.lower.shuffle"), "w"
) as f:
for index in indices:
f.write(text_data[index] + "\n")
else:
print(
"Path {} exists, skip its generation.".format(current_path) + LOG_STR,
flush=True,
)
for pname in subpath_names:
current_path = os.path.join(text_path, pname + ".txt")
if not os.path.exists(current_path):
with open(os.path.join(lists_path, pname + ".lst"), "r") as flist, open(
os.path.join(text_path, pname + ".txt"), "w"
) as fout:
for line in flist:
fout.write(" ".join(line.strip().split(" ")[3:]) + "\n")
else:
print(
"Path {} exists, skip its generation.".format(current_path) + LOG_STR,
flush=True,
)
print("Done!", flush=True)
| 6,030 | 35.77439 | 88 |
py
|
lanro-gym
|
lanro-gym-main/main.py
|
import gymnasium as gym
import os
import numpy as np
import lanro_gym
import argparse
import glfw
DEBUG = int("DEBUG" in os.environ and os.environ["DEBUG"])
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--interactive', action='store_true', dest='interactive', help='Start interactive mode')
parser.add_argument('-t', '--test', action='store_true', dest='test', help='Start test mode')
parser.add_argument('-r', '--reward', action='store_true', dest='reward', help='Print the reward.')
parser.add_argument('-a', '--action', action='store_true', dest='action', help='Print the action.')
parser.add_argument('--full', action='store_true', dest='full', help='Print everything')
parser.add_argument('--norender', action='store_false', dest='render', help='Deactive rendering', default=True)
parser.add_argument('--keyboard',
action='store_true',
dest='keyboard_control',
help='Activates keyboard control for joints.')
parser.add_argument('--metrics', action='store_true', help='Option to print environment metrics.')
parser.add_argument('--action_type', type=str, default='absolute_joints', help='Action type to control the robot.')
parser.add_argument(
'-e',
'--env',
default='PandaNLReach2-v0',
help=
f"Available envs: {', '.join([envkey for envkey in gym.envs.registry.keys() if 'Panda' in envkey])}"
)
return parser.parse_args()
def log_step(env, action, args):
obs, reward, terminated, truncated, info = env.step(action)
if args.reward:
print(f"reward: {reward} success: {info['is_success']}")
if args.action:
print(action)
if args.full:
print(obs, reward, terminated, truncated, info)
if args.metrics:
print(env.get_metrics())
if DEBUG and info['is_success'] or 'hindsight_instruction' in info.keys():
import ipdb
ipdb.set_trace()
return terminated or truncated or info['is_success']
def test(env, args):
for _ in range(100):
env.reset()
terminated = False
while not terminated:
action = env.action_space.sample()
terminated = log_step(env, action, args)
if args.render:
env.render(mode="human")
key_events = {
65297: "forward",
65298: "backward",
65295: "straight_left",
65296: "straight_right",
glfw.KEY_MINUS: "close_gripper",
glfw.KEY_5: "open_gripper",
43: "open_gripper",
glfw.KEY_8: "up",
glfw.KEY_2: "down",
glfw.KEY_1: "yaw_left",
glfw.KEY_3: "yaw_right",
glfw.KEY_6: "pitch_right",
glfw.KEY_4: "pitch_left",
glfw.KEY_7: "roll_left",
glfw.KEY_9: "roll_right",
}
def interactive(args):
env = gym.make(args.env, render=args.render, action_type=args.action_type)
# display GUI controls
if not args.keyboard_control:
import pybullet as p
env.env.sim.bclient.configureDebugVisualizer(p.COV_ENABLE_GUI, 1)
controls = env.robot.get_xyz_rpy_controls()
for _ in range(10):
env.reset()
terminated = False
action = np.zeros(shape=env.action_space.shape)
key_control_gain = 0.01
for idx, val in enumerate(env.robot.get_default_controls().values()):
if len(action) > idx:
action[idx] = val
while True:
if args.keyboard_control:
keys = env.getKeyboardEvents()
if keys:
key_str = ''.join(
[key_events[_pressed] for _pressed in keys.keys() if _pressed in key_events.keys()])
if "forward" in key_str:
action[3] += 1 * key_control_gain
if "backward" in key_str:
action[3] += -1 * key_control_gain
if "straight_left" in key_str:
action[0] += 1 * key_control_gain
if "straight_right" in key_str:
action[0] += -1 * key_control_gain
if "up" in key_str:
action[1] += -1 * key_control_gain
if "down" in key_str:
action[1] += 1 * key_control_gain
if not env.robot.fixed_gripper:
if "close_gripper" in key_str:
action[-1] += 1 * key_control_gain
if "open_gripper" in key_str:
action[-1] += -1 * key_control_gain
if env.action_space.shape[0] > 4:
if "roll_left" in key_str:
action[2] += 1 * key_control_gain
if "roll_right" in key_str:
action[2] += -1 * key_control_gain
if "pitch_left" in key_str:
action[4] += 1 * key_control_gain
if "pitch_right" in key_str:
action[4] += -1 * key_control_gain
if "yaw_left" in key_str:
action[5] += -1 * key_control_gain
if "yaw_right" in key_str:
action[5] += 1 * key_control_gain
else:
action = np.zeros(shape=env.action_space.shape)
for idx, ctrl_id in enumerate(controls):
try:
action[idx] = env.sim.bclient.readUserDebugParameter(ctrl_id)
except Exception as e:
print(e)
continue
terminated = log_step(env, np.array(action), args)
if args.render:
env.render(mode='human')
if args.metrics and terminated:
break
def main():
args = parse_args()
if args.test:
env = gym.make(args.env, render=args.render)
env.reset()
test(env, args)
env.close()
elif args.interactive:
interactive(args)
else:
raise ValueError("No valid mode found: use -t/--test (test mode) or -i/--interactive (interactive mode)")
if __name__ == '__main__':
main()
| 6,302 | 36.96988 | 119 |
py
|
lanro-gym
|
lanro-gym-main/setup.py
|
import os
from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
with open(os.path.join("lanro_gym", "VERSION"), "r") as f:
__version__ = f.read().strip()
setup(
name="lanro_gym",
description="Gymnasium multi-goal environments for goal-conditioned and language-conditioned deep reinforcement learning build with PyBullet",
author="Frank Röder",
author_email="[email protected]",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/frankroeder/lanro-gym",
packages=[package for package in find_packages() if package.startswith("lanro_gym")],
package_data={ "lanro_gym": ["VERSION"] },
include_package_data=True,
version=__version__,
install_requires=["gymnasium~=0.26", "pybullet", "numpy"],
extras_require={
"dev": ["pytest", "yapf", "ipdb", "glfw"]
},
python_requires=">=3.7",
classifiers=[
"Operating System :: OS Independent", "License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10"
],
)
| 1,279 | 36.647059 | 146 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/utils.py
|
from enum import Enum
import itertools
from typing import List, Set, Tuple
import numpy as np
import random
def get_prop_combinations(stream) -> Set[Tuple]:
combinations = itertools.permutations(stream, 2)
comblist = [c for c in combinations]
def filter_same_prop_type(x) -> bool:
return type(x[0]) != type(x[1])
filtered_combinations = [e for e in filter(filter_same_prop_type, comblist)]
assert len(comblist) >= len(filtered_combinations)
return set(filtered_combinations)
def expand_enums(concept_stream: List) -> List:
objects = []
for concept in concept_stream:
if isinstance(concept, tuple):
for concept_prod in itertools.product(*concept):
objects.append(concept_prod)
else:
for prop in concept:
objects.append(prop)
return objects
def get_random_enum_with_exceptions(enum_cls, exclude) -> Tuple[Enum, int]:
en_list = [e for e in enum_cls]
en = random.choice([_e for _e in en_list if _e not in exclude])
en_idx = en_list.index(en)
return en, en_idx
def scale_rgb(rgb_lst: List[float]) -> List[float]:
return [_color / 255.0 for _color in rgb_lst]
def get_one_hot_list(total_items: int) -> np.ndarray:
"""Create an array with `total_items` one-hot vectors."""
return np.eye(total_items)
def goal_distance(vec1: np.ndarray, vec2: np.ndarray) -> float:
assert vec1.shape == vec2.shape, "mismatch of vector shapes"
return np.linalg.norm(vec1 - vec2, axis=-1)
def post_process_camera_pixel(px, _height: int, _width: int) -> np.ndarray:
rgb_array = np.array(px, dtype=np.uint8).reshape(_height, _width, 4)
return rgb_array[:, :, :3]
def gripper_camera(bullet_client, projectionMatrix, pos, orn, imgsize: int = 84, mode: str = 'ego') -> np.ndarray:
if mode == 'static':
pos = [0, 0, 0]
distance = 0.6
yaw = 90
pitch = -40
elif mode == 'ego':
distance = 0.5
yaw = -45
pitch = -30
viewMatrix = bullet_client.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=pos,
distance=distance,
yaw=yaw,
pitch=pitch,
roll=0,
upAxisIndex=2)
(_, _, px, _, _) = bullet_client.getCameraImage(imgsize,
imgsize,
viewMatrix,
projectionMatrix,
flags=bullet_client.ER_NO_SEGMENTATION_MASK,
shadow=0,
renderer=bullet_client.ER_BULLET_HARDWARE_OPENGL)
return post_process_camera_pixel(px, imgsize, imgsize)
def environment_camera(bullet_client, projectionMatrix, viewMatrix, width: int = 500, height: int = 500) -> np.ndarray:
(_, _, px, _, _) = bullet_client.getCameraImage(width,
height,
viewMatrix,
projectionMatrix,
flags=bullet_client.ER_NO_SEGMENTATION_MASK,
renderer=bullet_client.ER_BULLET_HARDWARE_OPENGL)
return post_process_camera_pixel(px, width, height)
| 3,707 | 39.304348 | 119 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/simulation.py
|
import os
from typing import Iterator, List, Optional, Tuple, Dict, Any
import pybullet as p
import pybullet_data as pd
from pybullet_utils import bullet_client
from contextlib import contextmanager
from lanro_gym.env_utils import RGBCOLORS
import time
from lanro_gym.utils import environment_camera
import numpy as np
import warnings
import pkgutil
import subprocess
import xml.etree.ElementTree as ET
DEBUG = int("DEBUG" in os.environ and os.environ["DEBUG"])
DEBUG_CAM = int("DEBUG_CAM" in os.environ and os.environ["DEBUG_CAM"])
PYB_GPU = int("PYB_GPU" in os.environ and os.environ["PYB_GPU"])
GRAVITY: float = -9.81
HZ: float = 500
class PyBulletSimulation:
def __init__(self, n_substeps: int = 20, render: bool = False):
background_color = np.array([109.0, 219.0, 145.0]) / 255
self.render_on = render
if render:
options = "--background_color_red={} \
--background_color_green={} \
--background_color_blue={}".format(*background_color)
self.bclient = bullet_client.BulletClient(connection_mode=p.GUI, options=options)
# Enable GUI with key "g"
self.bclient.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
# Enable shadows with key "s"
self.bclient.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0)
self.bclient.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, DEBUG)
if DEBUG_CAM:
self._setup_camera_controls()
if DEBUG:
warnings.warn(
"Using DEBUG and DEBUG_CAM at the same time might result in"
" undesired behaviors when debug user parameters overlap", UserWarning)
else:
self.bclient = bullet_client.BulletClient(connection_mode=p.DIRECT)
self.time_step: float = 1. / HZ
self.n_substeps = n_substeps
self.bclient.setTimeStep(self.time_step)
self.bclient.resetSimulation()
self.bclient.setGravity(0, 0, GRAVITY)
self.bclient.setAdditionalSearchPath(pd.getDataPath())
# GPU support for faster rendering of camera images
if PYB_GPU:
os.environ['MESA_GL_VERSION_OVERRIDE'] = '3.3'
os.environ['MESA_GLSL_VERSION_OVERRIDE'] = '330'
# Get EGL device
assert 'CUDA_VISIBLE_DEVICES' in os.environ
devices = os.environ.get('CUDA_VISIBLE_DEVICES', ).split(',')
assert len(devices) == 1, "No devices specified by CUDA_VISIBLE_DEVICES"
out = subprocess.check_output(['nvidia-smi', '--id=' + str(devices[0]), '-q', '--xml-format'])
tree = ET.fromstring(out)
gpu = tree.findall('gpu')[0]
dev_id = gpu.find('minor_number').text
os.environ['EGL_VISIBLE_DEVICES'] = str(dev_id)
egl = pkgutil.get_loader('eglRenderer')
p.loadPlugin(egl.get_filename(), "_eglRendererPlugin")
self._bodies_idx: Dict[str, Any] = {}
@property
def dt(self) -> float:
"""the product of timeStep and n_substeps, dt, reflects how
much real time it takes to execute a robot action
dt = 1 / 500 * 20 = 0.04 seconds -> 25 Hz"""
return self.time_step * self.n_substeps
def get_object_id(self, body_name: str) -> int:
"""Get the id of the body.
Args:
body_name (str): The name of the body.
"""
return self._bodies_idx[body_name]
def step(self) -> None:
""" step the simulation forward for `num_steps` steps. """
if DEBUG_CAM:
self.read_camera_parameters()
for _ in range(self.n_substeps):
self.bclient.stepSimulation()
def close(self) -> None:
"""Close the simulation."""
self.bclient.disconnect()
def render(self, mode='human') -> Optional[np.ndarray]:
if mode == 'human':
self.bclient.configureDebugVisualizer(self.bclient.COV_ENABLE_SINGLE_STEP_RENDERING, 1)
time.sleep(self.dt)
if mode == 'rgb_array':
viewMatrix = self.bclient.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[-0.4, -0.25, 0.1],
distance=1.2,
yaw=-30,
pitch=-30,
roll=0,
upAxisIndex=2)
projectionMatrix = self.bclient.computeProjectionMatrixFOV(fov=60, aspect=1, nearVal=0.1, farVal=100)
return environment_camera(self.bclient, projectionMatrix, viewMatrix)
def _setup_camera_controls(self):
self._cam_controls = [
self.bclient.addUserDebugParameter("Distance", -15, 15, 1.2),
self.bclient.addUserDebugParameter("Yaw", -360, 360, 70),
self.bclient.addUserDebugParameter("Pitch", -360, 360, -50),
self.bclient.addUserDebugParameter("X", -10, 10, 0),
self.bclient.addUserDebugParameter("Y", -10, 10, 0),
self.bclient.addUserDebugParameter("Z", -10, 10, 0)
]
def read_camera_parameters(self):
self.bclient.resetDebugVisualizerCamera(
self.bclient.readUserDebugParameter(self._cam_controls[0]),
self.bclient.readUserDebugParameter(self._cam_controls[1]),
self.bclient.readUserDebugParameter(self._cam_controls[2]),
[self.bclient.readUserDebugParameter(self._cam_controls[i]) for i in range(3, 6)])
@contextmanager
def no_rendering(self) -> Iterator[None]:
"""Disable rendering within this context."""
self.bclient.configureDebugVisualizer(self.bclient.COV_ENABLE_RENDERING, 0)
yield
self.bclient.configureDebugVisualizer(self.bclient.COV_ENABLE_RENDERING, 1)
def loadURDF(self, body_name: str, **kwargs) -> str:
"""Load URDF file.
Args:
body_name (str): The name of the body. Must be unique in the sim.
"""
self._bodies_idx[body_name] = self.bclient.loadURDF(**kwargs)
return self._bodies_idx[body_name]
def loadSDF(self, body_name: str, **kwargs) -> str:
"""Load SDF file.
Args:
body_name (str): The name of the body. Must be unique in the sim.
"""
self._bodies_idx[body_name] = self.bclient.loadSDF(**kwargs)[0]
return self._bodies_idx[body_name]
def place_visualizer(self,
target: List = [-0.1, 0, -0.1],
distance: float = 1.1,
yaw: float = 45,
pitch: float = -30):
"""Orient the camera used for rendering.
Args:
target (x, y, z): Target position.
distance (float): Distance from the target position.
yaw (float): Yaw.
pitch (float): Pitch.
"""
self.bclient.resetDebugVisualizerCamera(
cameraDistance=distance,
cameraYaw=yaw,
cameraPitch=pitch,
cameraTargetPosition=target,
)
def set_lateral_friction(self, body: str, link: int, lateral_friction: float, **kwargs) -> None:
"""Set the lateral friction of a link.
Args:
body (str): Body unique name.
link (int): Link index in the body.
lateral_friction (float): Lateral friction.
"""
self.bclient.changeDynamics(bodyUniqueId=self._bodies_idx[body],
linkIndex=link,
lateralFriction=lateral_friction,
**kwargs)
def set_spinning_friction(self, body: str, link: int, spinning_friction: float, **kwargs):
"""Set the spinning friction of a link.
Args:
body (str): Body unique name.
link (int): Link index in the body.
spinning_friction (float): Spinning friction.
"""
self.bclient.changeDynamics(bodyUniqueId=self._bodies_idx[body],
linkIndex=link,
spinningFriction=spinning_friction,
**kwargs)
def get_quaternion_from_euler(self, euler_orn: List) -> List[float]:
""" Convert euler angles to quaternions."""
return self.bclient.getQuaternionFromEuler(euler_orn)
def get_euler_from_quaternion(self, quat: List) -> List[float]:
""" Convert quaternions to euler angles."""
return self.bclient.getEulerFromQuaternion(quat)
def get_base_position(self, body: str) -> List[float]:
"""Get the position of the body.
Args:
body (str): Body unique name.
Returns:
(x, y, z): The cartesian position.
"""
return self.bclient.getBasePositionAndOrientation(self._bodies_idx[body])[0]
def get_base_orientation(self, body: str) -> List[float]:
"""Get the orientation of the body.
Args:
body (str): Body unique name.
Returns:
(x, y, z, w): The orientation as quaternion.
"""
return self.bclient.getBasePositionAndOrientation(self._bodies_idx[body])[1]
def get_base_rotation(self, body: str) -> List[float]:
"""Get the rotation of the body.
Args:
body (str): Body unique name.
Returns:
(rx, ry, rz): The rotation.
"""
return self.bclient.getEulerFromQuaternion(self.get_base_orientation(body))
def get_base_velocity(self, body: str) -> List[float]:
"""Get the velocity of the body.
Args:
body (str): Body unique name.
Returns:
(vx, vy, vz): The cartesian velocity.
"""
return self.bclient.getBaseVelocity(self._bodies_idx[body])[0]
def get_base_angular_velocity(self, body: str) -> List[float]:
"""Get the angular velocity of the body.
Args:
body (str): Body unique name.
Returns:
(wx, wy, wz): The angular velocity.
"""
return self.bclient.getBaseVelocity(self._bodies_idx[body])[1]
def get_joint_angle(self, body: str, joint: int) -> float:
"""Get the angle of the joint of the body.
Args:
body (str): Body unique name.
joint (int): Joint index in the body
Returns:
float: The angle.
"""
return self.bclient.getJointState(self._bodies_idx[body], joint)[0]
def get_link_state(self, body: str, link: int) -> Tuple:
return self.bclient.getLinkState(self._bodies_idx[body], link, computeLinkVelocity=1)
def get_link_position(self, body: str, link: int) -> List:
"""Get the position of the link of the body.
Args:
body (str): Body unique name.
link (int): Link index in the body.
Returns:
(x, y, z): The cartesian position.
"""
return self.bclient.getLinkState(self._bodies_idx[body], link)[0]
def get_link_velocity(self, body: str, link: int) -> List:
"""Get the velocity of the link of the body.
Args:
body (str): Body unique name.
link (int): Link index in the body.
Returns:
(vx, vy, vz): The cartesian velocity.
"""
return self.bclient.getLinkState(self._bodies_idx[body], link, computeLinkVelocity=True)[6]
def get_num_joints(self, body: str) -> int:
return self.bclient.getNumJoints(self._bodies_idx[body])
def get_joint_info(self, body: str, link_idx: int):
return self.bclient.getJointInfo(self._bodies_idx[body], link_idx)
def control_joints(
self,
body: str,
joints: List,
target_angles: List,
forces: List,
) -> None:
"""Control the joints motor.
Args:
body (str): Body unique name.
joints (List[int]): List of joint indices.
target_angles (List[float]): List of target angles.
forces (List[float]): Forces to apply.
"""
self.bclient.setJointMotorControlArray(
self._bodies_idx[body],
jointIndices=joints,
controlMode=self.bclient.POSITION_CONTROL,
targetPositions=target_angles,
forces=forces,
)
def control_single_joint(self, body: str, joint: int, pos: float, force: float) -> None:
self.bclient.setJointMotorControl2(self._bodies_idx[body],
jointIndex=joint,
controlMode=self.bclient.POSITION_CONTROL,
targetPosition=pos,
force=force)
def set_joint_angles(self, body: str, joints: List, angles: List) -> None:
"""Set the angles of the joints of the body.
Args:
body (str): Body unique name.
joints (List[int]): List of joint indices.
angles (List[float]): List of target angles.
"""
for joint, angle in zip(joints, angles):
self.set_joint_angle(body, joint, angle)
def set_joint_angle(self, body: str, joint: int, angle: float):
"""Set the angle of the joint of the body.
Args:
body (str): Body unique name.
joint (int): Joint index in the body.
angle (float): Target angle.
"""
self.bclient.resetJointState(bodyUniqueId=self._bodies_idx[body], jointIndex=joint, targetValue=angle)
def set_base_pose(self, body: str, position: List, orientation: List) -> None:
"""Set the position of the body.
Args:
body (str): Body unique name.
position (x, y, z): The target cartesian position.
orientation (x, y, z, w): The target orientation as quaternion.
"""
self.bclient.resetBasePositionAndOrientation(bodyUniqueId=self._bodies_idx[body],
posObj=position,
ornObj=orientation)
def _create_geometry(self,
body_name: str,
geom_type: Any,
mass: float = 0,
position: List = [0, 0, 0],
ghost: bool = False,
lateral_friction: Optional[float] = None,
spinning_friction: Optional[float] = None,
visual_kwargs: Optional[Dict] = {},
collision_kwargs: Optional[Dict] = {},
multi_kwargs: Optional[Dict] = {}):
"""Create a geometry.
Args:
body_name (str): The name of the body. Must be unique in the sim.
geom_type (int): The geometry type. See self.bclient.GEOM_<shape>.
mass (float, optional): The mass in kg. Defaults to 0.
position (x, y, z): The position of the geom. Defaults to (0, 0, 0)
ghost (bool, optional): Whether the geometry can collide. Defaults
to False.
lateral_friction (float, optional): The friction coef.
spinning_friction (float, optional): The friction coef.
visual_kwargs (dict, optional): Visual kwargs. Defaults to {}.
collision_kwargs (dict, optional): Collision kwargs. Defaults to {}.
"""
baseVisualShapeIndex = self.bclient.createVisualShape(geom_type, **visual_kwargs)
if not ghost:
baseCollisionShapeIndex = self.bclient.createCollisionShape(geom_type, **collision_kwargs)
else:
baseCollisionShapeIndex = -1
self._bodies_idx[body_name] = self.bclient.createMultiBody(baseVisualShapeIndex=baseVisualShapeIndex,
baseCollisionShapeIndex=baseCollisionShapeIndex,
baseMass=mass,
basePosition=position,
**multi_kwargs)
if lateral_friction is not None:
self.set_lateral_friction(body=body_name, link=-1, lateral_friction=lateral_friction)
if spinning_friction is not None:
self.set_spinning_friction(body=body_name, link=-1, spinning_friction=spinning_friction)
def create_sphere(
self,
body_name: str,
radius: float,
mass: float,
position: List,
rgba_color: List,
specular_color: List = [0, 0, 0, 0],
ghost: Optional[bool] = False,
lateral_friction: Optional[float] = None,
spinning_friction: Optional[float] = None,
):
"""Create a sphere.
Args:
body_name (str): The name of the box. Must be unique in the sim.
radius (float): The radius in meter.
mass (float): The mass in kg.
position (x, y, z): The position of the sphere.
specular_color (r, g, b): RGB specular color.
rgba_color (r, g, b, a): RGBA color.
ghost (bool, optional): Whether the sphere can collide. Defaults to False.
friction (float, optionnal): The friction. If None, keep the pybullet default
value. Defaults to None.
"""
visual_kwargs = {
"radius": radius,
"specularColor": specular_color,
"rgbaColor": rgba_color,
}
collision_kwargs = {"radius": radius}
self._create_geometry(
body_name,
geom_type=self.bclient.GEOM_SPHERE,
mass=mass,
position=position,
ghost=ghost,
lateral_friction=lateral_friction,
spinning_friction=spinning_friction,
visual_kwargs=visual_kwargs,
collision_kwargs=collision_kwargs,
)
def create_box(
self,
body_name: str,
half_extents: List,
mass: float,
position: List,
rgba_color: List,
specular_color: List = [0, 0, 0, 0],
ghost: bool = False,
lateral_friction: float = 1.0,
spinning_friction: float = 0.005,
vis_kwargs: Dict = {},
coll_kwargs: Dict = {},
):
"""Create a box.
Args:
body_name (str): The name of the box. Must be unique in the sim.
half_extents (x, y, z): Half size of the box in meters.
mass (float): The mass in kg.
position (x, y, z): The position of the box.
specular_color (r, g, b): RGB specular color.
rgba_color (r, g, b, a): RGBA color.
ghost (bool, optional): Whether the box can collide. Defaults to False.
"""
visual_kwargs = {
"halfExtents": half_extents,
"specularColor": specular_color,
"rgbaColor": rgba_color,
**vis_kwargs
}
collision_kwargs = {"halfExtents": half_extents, **coll_kwargs}
return self._create_geometry(
body_name,
geom_type=self.bclient.GEOM_BOX,
mass=mass,
position=position,
ghost=ghost,
lateral_friction=lateral_friction,
spinning_friction=spinning_friction,
visual_kwargs=visual_kwargs,
collision_kwargs=collision_kwargs,
)
def create_cylinder(
self,
body_name: str,
radius: float,
height: float,
mass: float,
position: List,
rgba_color: List,
specular_color: List = [0, 0, 0, 0],
ghost: Optional[bool] = False,
lateral_friction: Optional[float] = None,
spinning_friction: Optional[float] = None,
):
"""Create a cylinder.
Args:
body_name (str): The name of the box. Must be unique in the sim.
radius (float): The radius in meter.
height (float): The radius in meter.
mass (float): The mass in kg.
position (x, y, z): The position of the sphere.
specular_color (r, g, b): RGB specular color.
rgba_color (r, g, b, a): RGBA color.
ghost (bool, optional): Whether the sphere can collide. Defaults to False.
"""
visual_kwargs = {
"radius": radius,
"length": height,
"specularColor": specular_color,
"rgbaColor": rgba_color,
}
collision_kwargs = {"radius": radius, "height": height}
self._create_geometry(
body_name,
geom_type=self.bclient.GEOM_CYLINDER,
mass=mass,
position=position,
ghost=ghost,
lateral_friction=lateral_friction,
spinning_friction=spinning_friction,
visual_kwargs=visual_kwargs,
collision_kwargs=collision_kwargs,
)
def get_contact_points(self, body1: str, body2: str, **kwargs) -> Tuple:
""" Returns a tuple of contact point lists of body1 and body2 """
return self.bclient.getContactPoints(self._bodies_idx[body1], self._bodies_idx[body2], **kwargs)
def remove_body(self, body_name):
"""Removes a body from the simulation dictionary"""
if body_name in self._bodies_idx:
self.bclient.removeBody(self._bodies_idx[body_name])
del self._bodies_idx[body_name]
def set_orientation_lines(self, robot_uid, parent_link_index, offset=0.065):
""" Visualize orientation lines for the robots end effector."""
line_color = RGBCOLORS.BLUE.value[0]
self.bclient.addUserDebugLine([-1, 0, offset], [1, 0, offset],
line_color,
parentObjectUniqueId=robot_uid,
parentLinkIndex=parent_link_index)
self.bclient.addUserDebugLine([0, -1, offset], [0, 1, offset],
line_color,
parentObjectUniqueId=robot_uid,
parentLinkIndex=parent_link_index)
self.bclient.addUserDebugLine([0, 0, -1], [0, 0, 1],
line_color,
parentObjectUniqueId=robot_uid,
parentLinkIndex=parent_link_index)
| 22,773 | 41.330855 | 115 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/language_utils.py
|
from typing import List, Set, Tuple
import numpy as np
def create_commands(command_type: str,
property_tuple: Tuple,
action_verbs: List[str] = [],
use_base=True,
use_synonyms=False) -> List[str]:
sentences = []
primary_property, secondary_property = property_tuple
if command_type == 'instruction':
assert len(action_verbs) > 0
beginnings = action_verbs
elif command_type == 'repair':
beginnings = ['no i meant', 'no', 'sorry', 'pardon', 'excuse me', 'actually']
# no i meant the red
# no i meant the red cube
elif command_type == 'negation':
# not the red
# not the red cube
beginnings = ['not']
else:
raise ValueError('Unknown command type')
for _begin in beginnings:
if use_base:
sentences.append(_begin + " the " + primary_property.name.lower() + " " + secondary_property.name.lower())
if use_synonyms:
# combine synonyms with synonyms
for psyn in primary_property.value[1]:
for ssyn in secondary_property.value[1]:
sentences.append(_begin + " the " + psyn.lower() + " " + ssyn.lower())
if use_base and use_synonyms:
# combine property name with synonyms
for psyn in primary_property.value[1]:
sentences.append(_begin + " the " + psyn.lower() + " " + secondary_property.name.lower())
for ssyn in secondary_property.value[1]:
sentences.append(_begin + " the " + primary_property.name.lower() + " " + ssyn.lower())
return list(set(sentences))
def parse_instructions(instructions: List[str]) -> Tuple[Set[str], int]:
word_list = []
max_instruction_len = 0
for _instrucion in instructions:
_splitted = _instrucion.lower().split(' ')
if len(_splitted) > max_instruction_len:
max_instruction_len = len(_splitted)
word_list.extend(_splitted)
return set(word_list), max_instruction_len
def word_in_string(instr_string: str, word_lst: np.ndarray):
match_array = np.array([word in instr_string for word in word_lst]).astype(int)
# additional check, because argmax of zero vector yields result of 0
if match_array.sum():
word_idx = np.argmax(match_array)
return word_lst[word_idx]
return ''
class Vocabulary:
def __init__(self, words: List[str]):
word_list = ['<pad>'] + sorted(list(set(words)))
_idx_list = np.arange(0, len(word_list))
self.idx2word = dict(zip(_idx_list, word_list))
self.word2idx = dict(zip(word_list, _idx_list))
assert len(self.idx2word) == len(self.word2idx)
def idx_to_word(self, idx: int) -> str:
return self.idx2word[idx]
def word_to_idx(self, word: str) -> int:
return self.word2idx[word]
def __call__(self, word) -> int:
return self.word_to_idx(word)
def __len__(self) -> int:
return len(self.word2idx)
| 3,061 | 35.891566 | 118 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/__init__.py
|
from gymnasium.envs.registration import register
for robot in ['Panda']:
for reward_type in ["sparse", "dense"]:
_r_type = "Dense" if reward_type == "dense" else ""
kwargs = {
"reward_type": reward_type,
}
register(
id=f'{robot}Reach{_r_type}-v0',
entry_point='lanro_gym.environments:{}ReachEnv'.format(robot),
max_episode_steps=50,
kwargs=kwargs,
)
register(
id=f'{robot}Push{_r_type}-v0',
entry_point='lanro_gym.environments:{}PushEnv'.format(robot),
max_episode_steps=50,
kwargs=kwargs,
)
register(
id=f'{robot}Slide{_r_type}-v0',
entry_point='lanro_gym.environments:{}SlideEnv'.format(robot),
max_episode_steps=50,
kwargs=kwargs,
)
register(
id=f'{robot}PickAndPlace{_r_type}-v0',
entry_point='lanro_gym.environments:{}StackEnv'.format(robot),
max_episode_steps=50,
kwargs={
**kwargs,
'num_obj': 1,
'goal_z_range': 0.2,
},
)
for num_obj in [2, 3, 4]:
register(
id=f'{robot}Stack{num_obj}{_r_type}-v0',
entry_point='lanro_gym.environments:{}StackEnv'.format(robot),
max_episode_steps=50 * num_obj,
kwargs={
**kwargs, 'num_obj': num_obj
},
)
for num_obj in [2, 3]:
for _mode in [
'Default', 'Color', 'Shape', 'Weight', 'Size', 'ColorShape', 'WeightShape', 'SizeShape',
'ColorShapeSize', 'ColorShapeSizeWeight'
]:
for _obstype in ['state', 'pixelego', 'pixelstatic']:
_current_obstype = ''
_cam_mode = 'ego'
if _obstype == 'pixelego':
_current_obstype = 'PixelEgo'
_obstype = 'pixel'
elif _obstype == 'pixelstatic':
_cam_mode = 'static'
_current_obstype = 'PixelStatic'
_obstype = 'pixel'
for _h_instr in [True, False]:
for _a_repair in [True, False]:
for _negation_repair in [True, False]:
for _delay_a_repair in [True, False]:
for _use_synonyms in [True, False]:
_current_mode = '' if _mode == 'Default' else _mode
_current_h_instr = 'HI' if _h_instr else ''
_use_syn = 'Synonyms' if _use_synonyms else ''
_current_a_repair = ''
if _a_repair and _negation_repair:
_current_a_repair = 'ARN'
elif _a_repair and not _negation_repair:
_current_a_repair = 'AR'
elif not _a_repair and _negation_repair:
continue
if _a_repair and _delay_a_repair:
_current_a_repair += 'D'
elif not _a_repair and not _delay_a_repair:
continue
# NOTE: Use 100 for action repair, as the
# agent needs to solve the task for possibly 2 goals in one episode
_max_episode_steps = 100 if _a_repair else 50
_kwargs = {
'num_obj': num_obj,
'mode': _mode.lower(),
'obs_type': _obstype,
'use_hindsight_instructions': _h_instr,
'use_action_repair': _a_repair,
'delay_action_repair': _delay_a_repair,
'use_negations_action_repair': _negation_repair,
'camera_mode': _cam_mode,
'use_synonyms': _use_synonyms
}
param_combination = f"{num_obj}{_current_mode}{_current_obstype}{_use_syn}{_current_h_instr}{_current_a_repair}"
register(id=f'{robot}NLReach{param_combination}-v0',
entry_point='lanro_gym.environments:{}NLReachEnv'.format(robot),
max_episode_steps=_max_episode_steps,
kwargs=_kwargs)
register(id=f'{robot}NLPush{param_combination}-v0',
entry_point='lanro_gym.environments:{}NLPushEnv'.format(robot),
max_episode_steps=_max_episode_steps,
kwargs=_kwargs)
register(id=f'{robot}NLGrasp{param_combination}-v0',
entry_point='lanro_gym.environments:{}NLGraspEnv'.format(robot),
max_episode_steps=_max_episode_steps,
kwargs=_kwargs)
register(id=f'{robot}NLLift{param_combination}-v0',
entry_point='lanro_gym.environments:{}NLLiftEnv'.format(robot),
max_episode_steps=_max_episode_steps,
kwargs=_kwargs)
| 5,997 | 50.264957 | 148 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/robots/pybrobot.py
|
from collections import namedtuple
import os
from gymnasium import spaces
from typing import Callable, Dict, List, Optional
import numpy as np
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.env_utils import RGBCOLORS
DEBUG = int("DEBUG" in os.environ and os.environ["DEBUG"])
JointInfo = namedtuple('JointInfo', [
'id', 'name', 'type', 'damping', 'friction', 'lowerLimit', 'upperLimit', 'maxForce', 'maxVelocity', 'controllable'
])
GRIPPER_VEL: int = 4
class PyBulletRobot:
NEUTRAL_JOINT_VALUES: List
NEUTRAL_FINGER_VALUES: List
default_arm_orn_RPY: List
num_DOF: int
action_space = None
ee_link: int
gripper_obs_left_z_offset = 0.0
gripper_obs_right_z_offset = 0.0
left_finger_id = -1
right_finger_id = -1
def __init__(self, sim: PyBulletSimulation, body_name, file_name, base_position, base_orientation, action_type,
full_state, fixed_gripper, finger_friction, camera_mode, **kwargs):
"""
:param sim: Simulation class
:param fixed_gripper: The boolean variable to lock the gripper
:param base_position: The [x, y, z] base coordinates for the end-effector
:param fingers_friction: The amount of finger friction of the gripper
:param full state: If the full state should be returned
:param action_type: How actions are calculated
One of ['absolute_quat', 'relative_quat', 'relative_joints',
'absolute_joints', 'absolute_rpy', 'relative_rpy', 'end_effector']
"""
self.sim = sim
self.body_name = body_name
self.action_type = action_type
self.full_state = full_state
self.fixed_gripper = fixed_gripper
self.max_joint_change = sim.dt
# gripper change is four times faster than joint changes. This in
# combination with the force increase was necessary to achieve a
# good success rate for pick and place.
self.max_gripper_change = sim.dt * GRIPPER_VEL
self.camera_mode = camera_mode
self.action_functions: Dict[str, Callable[[np.ndarray, np.ndarray], Optional[np.ndarray]]] = {
'absolute_quat': self.absolute_quat_step,
'relative_quat': self.relative_quat_step,
'relative_joints': self.relative_joint_step,
'absolute_joints': self.absolute_joint_step,
'absolute_rpy': self.absolute_rpy_step,
'relative_rpy': self.relative_rpy_step,
'end_effector': self.end_effector_step,
}
with self.sim.no_rendering():
self._load_robot(file_name, base_position, base_orientation, **kwargs)
self._parse_joint_info()
self.setup(finger_friction)
def _load_robot(self, file_name, base_position, base_orientation, **kwargs):
if 'urdf' in file_name:
self._uid = self.sim.loadURDF(body_name=self.body_name,
fileName=file_name,
basePosition=base_position,
baseOrientation=base_orientation,
useFixedBase=True,
**kwargs)
elif 'sdf' in file_name:
self._uid = self.sim.loadSDF(body_name=self.body_name, sdfFileName=file_name)
self.sim.set_base_pose(self.body_name, base_position, base_orientation)
def _parse_joint_info(self):
num_joints = self.sim.get_num_joints(self.body_name)
self.joints = []
self.controllable_joints = []
for i in range(num_joints):
info = self.sim.get_joint_info(self.body_name, i)
jointID = info[0]
jointName = info[1].decode("utf-8")
jointType = info[2] # JOINT_REVOLUTE, JOINT_PRISMATIC, JOINT_SPHERICAL, JOINT_PLANAR, JOINT_FIXED
jointDamping = info[6]
jointFriction = info[7]
jointLowerLimit = info[8]
jointUpperLimit = info[9]
jointMaxForce = info[10]
jointMaxVelocity = info[11]
controllable = (jointType != self.sim.bclient.JOINT_FIXED)
if controllable:
self.controllable_joints.append(jointID)
info = JointInfo(jointID, jointName, jointType, jointDamping, jointFriction, jointLowerLimit,
jointUpperLimit, jointMaxForce, jointMaxVelocity, controllable)
self.joints.append(info)
self.arm_joints = self.controllable_joints[:self.num_DOF]
self.ee_joints = self.controllable_joints[self.num_DOF:]
self.arm_lower_limits = [info.lowerLimit for info in self.joints if info.controllable][:self.num_DOF]
self.arm_upper_limits = [info.upperLimit for info in self.joints if info.controllable][:self.num_DOF]
self.arm_joint_ranges = [info.upperLimit - info.lowerLimit for info in self.joints
if info.controllable][:self.num_DOF]
self.ee_lower_limits = [info.lowerLimit for info in self.joints if info.controllable][self.num_DOF:]
self.ee_upper_limits = [info.upperLimit for info in self.joints if info.controllable][self.num_DOF:]
self.ee_joint_ranges = [info.upperLimit - info.lowerLimit for info in self.joints
if info.controllable][self.num_DOF:]
self.arm_max_force = [self.joints[arm_id].maxForce for arm_id in self.arm_joints]
self.ee_max_force = [self.joints[ee_id].maxForce for ee_id in self.ee_joints]
def get_ee_position(self) -> np.ndarray:
"""Returns the position of the end-effector as (x, y, z)"""
return self.get_link_position(self.ee_link)
def get_ee_velocity(self) -> np.ndarray:
"""Returns the velocity of the end-effector as (vx, vy, vz)"""
return self.get_link_velocity(self.ee_link)
def reset(self) -> None:
self.sim.set_joint_angles(self.body_name,
joints=self.arm_joints + self.ee_joints,
angles=self.NEUTRAL_JOINT_VALUES + self.NEUTRAL_FINGER_VALUES)
def setup(self, finger_friction):
"""Setup robot's action space and finger friction"""
# XYZ relative end-effector change in position
if self.action_type == 'end_effector':
action_high = np.array([1] * 3)
action_low = -action_high.copy()
# relative joint change
elif self.action_type == 'relative_joints':
action_high = np.array([1] * self.num_DOF)
action_low = -action_high.copy()
# absolute joint values and
elif self.action_type == 'absolute_joints':
action_high = np.array(self.arm_upper_limits)
action_low = np.array(self.arm_lower_limits)
# absolute rpy values
elif self.action_type == 'absolute_rpy':
action_high = np.array(self.arm_upper_limits[:6])
action_low = np.array(self.arm_lower_limits[:6])
# relative joint and rpy change
elif self.action_type == 'relative_rpy':
action_high = np.array([1] * 6)
action_low = -action_high.copy()
# relative quaternion change
elif self.action_type == 'relative_quat':
action_high = np.array([1] * 7)
action_low = -action_high.copy()
# absolute quaternion
elif self.action_type == 'absolute_quat':
action_high = np.array([1] * 7)
action_low = -action_high.copy()
else:
raise ValueError("Unknown action type")
# add gripper to action space
if not self.fixed_gripper:
action_high = np.concatenate((action_high, [1.]))
action_low = np.concatenate((action_low, [-1.]))
self.action_space = spaces.Box(low=action_low, high=action_high, dtype='float32')
# set lateral and spinning friction for fingers
self.sim.set_lateral_friction(self.body_name, self.ee_joints[0], lateral_friction=finger_friction)
self.sim.set_lateral_friction(self.body_name, self.ee_joints[1], lateral_friction=finger_friction)
self.sim.set_spinning_friction(self.body_name, self.ee_joints[0], spinning_friction=0.05)
self.sim.set_spinning_friction(self.body_name, self.ee_joints[1], spinning_friction=0.05)
def set_action(self, action) -> None:
''' Takes in the action and uses the appropriate function to determine the joint angles
for execution in the environment '''
raw_action = np.copy(action)
if self.fixed_gripper:
gripper = None
else:
action = raw_action[:-1]
gripper = raw_action[-1]
self.action_functions[self.action_type](action, gripper)
def absolute_quat_step(self, action, gripper) -> None:
"""apply absolute quaternions to the joints"""
assert len(action) == 7 # (x,y,z) (qx, qy, qz, qw)
new_pos = action[0:3]
new_orn = action[3:7] # as quaternions
self.goto(pos=new_pos, orn=new_orn, gripper=gripper)
def relative_quat_step(self, action, gripper) -> None:
"""apply relative quaternions to the joints"""
assert len(action) == 7 # (Δx,Δy,Δz) (Δqx, Δqy, Δqz, Δqw)
state = self.sim.get_link_state(self.body_name, self.ee_link)
current_pos, current_orn = state[0], state[1]
new_pos = action[0:3] * self.max_joint_change + current_pos
new_orn = action[3:7] * self.max_joint_change + current_orn # as quaternions
self.goto(new_pos, new_orn, gripper)
def absolute_rpy_step(self, action, gripper) -> None:
"""apply absolute roll, pitch, and yaw to the joints"""
assert len(action) == 6
new_pos = action[0:3]
new_orn = action[3:6]
self.goto(new_pos, self.sim.get_quaternion_from_euler(new_orn), gripper)
def relative_rpy_step(self, action, gripper) -> None:
"""apply relative action to roll, pitch, yaw, and the joints"""
assert len(action) == 6 # (Δx,Δy,Δz) (Δr, Δp, Δy)
state = self.sim.get_link_state(self.body_name, self.ee_link)
current_pos, current_orn = state[0], state[1]
current_orn = self.sim.get_euler_from_quaternion(current_orn)
new_pos = action[0:3] * self.max_joint_change + current_pos
new_orn = action[3:6] * self.max_joint_change + current_orn
self.goto(new_pos, self.sim.get_quaternion_from_euler(new_orn), gripper)
def relative_joint_step(self, action, gripper) -> None:
"""apply relative values to the joints"""
assert len(action) == self.num_DOF # Δx_i
current_poses = self.get_current_pos()
jointPoses = action * self.max_joint_change + current_poses
self.goto_joint_poses(jointPoses, gripper)
def absolute_joint_step(self, action, gripper) -> None:
"""apply absolute values to the joints"""
self.goto_joint_poses(action, gripper)
def end_effector_step(self, action, gripper):
assert len(action) == 3
ee_ctrl = action * self.max_joint_change
ee_position = self.get_ee_position()
ee_target_position = ee_position + ee_ctrl
self.goto(ee_target_position, self.default_arm_orn_RPY, gripper)
def goto(self, pos=None, orn=None, gripper=None) -> None:
''' Uses PyBullet IK to solve for desired joint angles '''
joint_poses = self.sim.bclient.calculateInverseKinematics(
bodyUniqueId=self._uid,
endEffectorLinkIndex=self.ee_link,
targetPosition=pos,
targetOrientation=orn,
# IK requires all 4 lists (lowerLimits, upperLimits, jointRanges, restPoses).
# Otherwise regular IK will be used.
lowerLimits=self.arm_lower_limits + self.ee_lower_limits,
upperLimits=self.arm_upper_limits + self.ee_upper_limits,
jointRanges=self.arm_joint_ranges + self.ee_joint_ranges,
restPoses=self.NEUTRAL_JOINT_VALUES + self.NEUTRAL_FINGER_VALUES,
maxNumIterations=100,
residualThreshold=1e-5)
joint_poses = list(joint_poses[0:self.num_DOF])
self.goto_joint_poses(joint_poses, gripper)
def goto_joint_poses(self, joint_target_angles: List, gripper: float) -> None:
if gripper is not None:
# call robot-specific gripper function
finger_target_angles = self.gripper_control(gripper)
else:
finger_target_angles = self.gripper_control(None)
self.control_joints(np.concatenate([joint_target_angles, finger_target_angles]))
def gripper_control(self, amount) -> List:
raise NotImplementedError
def get_camera_img(self):
raise NotImplementedError
def get_link_position(self, link: int) -> np.ndarray:
"""Returns the position of a link as (x, y, z)"""
return np.array(self.sim.get_link_position(self.body_name, link))
def get_link_velocity(self, link: int) -> np.ndarray:
"""Returns the velocity of a link as (vx, vy, vz)"""
return np.array(self.sim.get_link_velocity(self.body_name, link))
def get_current_pos(self) -> np.ndarray:
return np.array([self.sim.get_joint_angle(self.body_name, j) for j in self.arm_joints])
def control_joints(self, target_angles: List) -> None:
self.sim.bclient.setJointMotorControlArray(
bodyUniqueId=self._uid,
jointIndices=self.arm_joints + self.ee_joints,
controlMode=self.sim.bclient.POSITION_CONTROL,
targetPositions=target_angles,
forces=self.arm_max_force + self.ee_max_force,
)
def get_fingers_width(self) -> float:
"""Returns the distance between the fingers."""
finger1 = self.sim.get_joint_angle(self.body_name, self.left_finger_id)
finger2 = self.sim.get_joint_angle(self.body_name, self.right_finger_id)
return finger1 + finger2
def gripper_ray_obs(self):
"""
This method performs a single raycast to determine which object is between
the robot's grippers with a specific z-offset accounting for detection
between the fingertips.
"""
leftg = self.get_link_position(self.left_finger_id)
rightg = self.get_link_position(self.right_finger_id)
leftg[-1] -= self.gripper_obs_left_z_offset
rightg[-1] -= self.gripper_obs_right_z_offset
leftg = tuple(leftg)
rightg = tuple(rightg)
hit_obj_id, link_idx, hit_fraction, hit_pos, hit_normal = self.sim.bclient.rayTest(leftg, rightg)[0]
if DEBUG:
line_color = RGBCOLORS.PINK.value[0]
self.sim.bclient.addUserDebugLine(leftg, rightg, line_color, 0.5, 1, replaceItemUniqueId=0)
return hit_obj_id, link_idx, hit_fraction, hit_pos, hit_normal
def get_obs(self):
if self.fixed_gripper:
gripper_state = np.concatenate((self.get_ee_position(), self.get_ee_velocity()))
else:
gripper_state = np.concatenate((self.get_ee_position(), self.get_ee_velocity(), [self.get_fingers_width()]))
if self.full_state:
state = self.sim.get_link_state(self.body_name, self.ee_link)
orn, orn_vel = state[1], state[-1]
current_poses = self.get_current_pos()
return np.concatenate(
(gripper_state, self.sim.get_euler_from_quaternion(orn), orn_vel, current_poses)).copy()
else:
return gripper_state.copy()
def get_default_controls(self):
if self.action_type == 'absolute_joints':
default_values = {
_key: _val
for _key, _val in zip([str(_idx)
for _idx in range(len(self.NEUTRAL_JOINT_VALUES))], self.NEUTRAL_JOINT_VALUES)
}
elif self.action_type == 'relative_joints':
default_values = {
_key: _val
for _key, _val in zip([str(_idx) for _idx in range(len(self.NEUTRAL_JOINT_VALUES))], [0] *
len(self.NEUTRAL_JOINT_VALUES))
}
else:
default_values = {"X": 0.0, "Y": 0.0, "Z": 0.0, "1": 0.0, "2": 0.0, "3": 0.0, "4": 0.0}
return default_values
def get_xyz_rpy_controls(self):
default_values = self.get_default_controls()
controls = []
as_low = self.action_space.low
as_high = self.action_space.high
if self.action_type in ['relative_joints', 'absolute_joints']:
for _idx, _dv in enumerate(list(default_values.values())):
controls.append(self.sim.bclient.addUserDebugParameter(str(_idx), as_low[_idx], as_high[_idx], _dv))
else:
## if action_type == 'end_effector'
controls.append(self.sim.bclient.addUserDebugParameter("X", as_low[0], as_high[0], default_values['X']))
controls.append(self.sim.bclient.addUserDebugParameter("Y", as_low[1], as_high[1], default_values['Y']))
controls.append(self.sim.bclient.addUserDebugParameter("Z", as_low[2], as_high[2], default_values['Z']))
if self.action_type in ['relative_rpy', 'absolute_rpy']:
# RPY
controls.append(self.sim.bclient.addUserDebugParameter("Rx", as_low[3], as_high[3],
default_values['1']))
controls.append(self.sim.bclient.addUserDebugParameter("Px", as_low[4], as_high[4],
default_values['2']))
controls.append(self.sim.bclient.addUserDebugParameter("Yx", as_low[5], as_high[5],
default_values['3']))
elif self.action_type in ['relative_quat', 'absolute_quat']:
# quaternions
controls.append(self.sim.bclient.addUserDebugParameter("Qx", as_low[3], as_high[3],
default_values['1']))
controls.append(self.sim.bclient.addUserDebugParameter("Qy", as_low[4], as_high[4],
default_values['2']))
controls.append(self.sim.bclient.addUserDebugParameter("Qz", as_low[5], as_high[5],
default_values['3']))
controls.append(self.sim.bclient.addUserDebugParameter("Qw", as_low[6], as_high[6],
default_values['4']))
if not self.fixed_gripper:
controls.append(self.sim.bclient.addUserDebugParameter("grip", as_low[-1], as_high[-1], 0))
return controls
| 18,934 | 48.309896 | 120 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/robots/__init__.py
|
from .panda import Panda
from .pybrobot import PyBulletRobot
| 61 | 19.666667 | 35 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/robots/panda.py
|
from typing import List
from lanro_gym.simulation import PyBulletSimulation
import numpy as np
from lanro_gym.robots.pybrobot import PyBulletRobot
from lanro_gym.utils import gripper_camera
class Panda(PyBulletRobot):
NEUTRAL_JOINT_VALUES: List = [0.00, 0.41, 0.00, -1.85, -0.00, 2.26, 0.79]
NEUTRAL_FINGER_VALUES: List = [0, 0]
ee_link: int = 11
num_DOF: int = 7
gripper_obs_left_z_offset = 0.026
gripper_obs_right_z_offset = 0.026
left_finger_id = 9
right_finger_id = 10
def __init__(self,
sim: PyBulletSimulation,
fixed_gripper: bool = False,
base_position: List[float] = [-0.6, 0, 0],
full_state: bool = True,
action_type: str = 'relative_joints',
finger_friction: float = 1.0,
camera_mode: str = 'ego'):
super().__init__(sim,
body_name="panda",
file_name="franka_panda/panda.urdf",
base_position=base_position,
base_orientation=sim.get_quaternion_from_euler([0, 0, 0]),
action_type=action_type,
fixed_gripper=fixed_gripper,
full_state=full_state,
finger_friction=finger_friction,
camera_mode=camera_mode)
self.default_arm_orn_RPY = sim.get_quaternion_from_euler([2 * np.pi, np.pi, np.pi])
self.sim.set_orientation_lines(self._uid, 8)
# create a constraint to keep the fingers aligned
_c = self.sim.bclient.createConstraint(self._uid,
self.ee_joints[0],
self._uid,
self.ee_joints[1],
jointType=self.sim.bclient.JOINT_GEAR,
jointAxis=[1, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
self.sim.bclient.changeConstraint(_c, gearRatio=-1, erp=0.1, maxForce=50)
# increase forces of some joints and the end effector
self.arm_max_force[5] *= 5
self.arm_max_force[6] *= 5
self.ee_max_force = [85, 85]
def gripper_control(self, amount: float) -> List:
if amount == None:
return self.NEUTRAL_FINGER_VALUES
fingers_ctrl = amount * self.max_gripper_change
fingers_width = self.get_fingers_width()
target_finger_width = fingers_width + fingers_ctrl
target_angles = [target_finger_width / 2, target_finger_width / 2]
return target_angles
def get_camera_img(self):
ee_position = np.array(self.get_ee_position())
projectionMatrix = self.sim.bclient.computeProjectionMatrixFOV(fov=60, aspect=1, nearVal=0.1, farVal=100.0)
return gripper_camera(self.sim.bclient, projectionMatrix, ee_position, [0, 0, 0, 0], mode=self.camera_mode)
| 3,127 | 45.686567 | 115 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/env_utils/task_object_list.py
|
from enum import Enum
from typing import Dict, List
from lanro_gym.utils import get_prop_combinations
from lanro_gym.env_utils import RGBCOLORS, SHAPES, WEIGHTS, SIZES, TaskObject
class TaskObjectList:
def __init__(self,
sim,
color_mode: bool = False,
shape_mode: bool = False,
weight_mode: bool = False,
size_mode: bool = False):
self.sim = sim
# default colors
concept_list: List[Enum] = [RGBCOLORS.RED, RGBCOLORS.GREEN, RGBCOLORS.BLUE]
if color_mode:
# extend range of colors
concept_list.extend([
RGBCOLORS.YELLOW,
RGBCOLORS.PURPLE,
RGBCOLORS.ORANGE,
RGBCOLORS.PINK,
RGBCOLORS.CYAN,
RGBCOLORS.BROWN,
])
# shape mode combinations
if shape_mode:
concept_list.extend([SHAPES.CUBE, SHAPES.CUBOID, SHAPES.CYLINDER])
if weight_mode:
concept_list.extend([WEIGHTS.HEAVY, WEIGHTS.LIGHT])
if size_mode:
concept_list.extend([SIZES.SMALL, SIZES.MEDIUM, SIZES.BIG])
self.objects = self.setup(concept_list)
def setup(self, concept_list) -> List[TaskObject]:
objects = []
# add single property to task
for concept in concept_list:
_args = self.get_task_obj_args({}, concept)
objects.append(TaskObject(self.sim, **_args))
concept_tuple_list = get_prop_combinations(concept_list)
if len(concept_tuple_list):
for concept_tuple in concept_tuple_list:
prop1 = concept_tuple[0]
prop2 = concept_tuple[1]
_args = self.get_task_obj_args({}, prop1)
_args = self.get_task_obj_args(_args, prop2, primary=False)
objects.append(TaskObject(self.sim, **_args))
return objects
@staticmethod
def get_task_obj_args(_args, prop, primary=True) -> Dict:
if isinstance(prop, SIZES):
enum = SIZES
elif isinstance(prop, SHAPES):
enum = SHAPES
elif isinstance(prop, WEIGHTS):
enum = WEIGHTS
elif isinstance(prop, RGBCOLORS):
enum = RGBCOLORS
prop_list = [sz for sz in enum]
prop_idx = prop_list.index(prop)
if primary:
_args['primary'] = prop
_args['onehot_idx'] = prop_idx
else:
_args['secondary'] = prop
_args['sec_onehot_idx'] = prop_idx
return _args
def get_obj_properties(self, objects=None) -> List:
if objects is None:
objects = self.objects
return [obj.get_properties() for obj in objects]
def __getitem__(self, index) -> TaskObject:
return self.objects[index]
def __len__(self) -> int:
return len(self.objects)
| 2,923 | 33 | 83 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/env_utils/object_properties.py
|
from enum import Enum
from lanro_gym.utils import scale_rgb
class DUMMY(Enum):
OBJECT = 0, ['object']
class RGBCOLORS(Enum):
""" RGBColors enum class with all colors defined as array of floats [0, 1]"""
BLACK = scale_rgb([0, 0, 0]), ["ebony"]
BLUE = scale_rgb([78.0, 121.0, 167.0]), ["azure"]
BROWN = scale_rgb([156.0, 117.0, 95.0]), ["chocolate"]
CYAN = scale_rgb([118.0, 183.0, 178.0]), ["teal"]
GRAY = scale_rgb([186.0, 176.0, 172.0]), ["ashen"]
GREEN = scale_rgb([89.0, 169.0, 79.0]), ["lime"]
PINK = scale_rgb([255.0, 157.0, 167.0]), ["coral"]
ORANGE = scale_rgb([242.0, 142.0, 43.0]), ["apricot"]
PURPLE = scale_rgb([176.0, 122.0, 161.0]), ["lilac"]
RED = scale_rgb([255.0, 87.0, 89.0]), ["scarlet"]
WHITE = scale_rgb([255, 255, 255]), ["colorless"]
YELLOW = scale_rgb([237.0, 201.0, 72.0]), ["amber"]
class SHAPES(Enum):
""" SHAPES enum class with all shapes with the corresponding object file id and words"""
CUBE = 0, ["box", "block"],
CUBOID = 1, ["brick", "oblong"],
CYLINDER = 2, ["barrel", "tophat"],
class WEIGHTS(Enum):
LIGHT = 1, ['lightweight', 'lite']
HEAVY = 4, ['heavyweight', 'massy']
class SIZES(Enum):
SMALL = 0.03, ['little', 'tiny']
MEDIUM = 0.04, ['midsize', 'moderate-size']
BIG = 0.05, ['large', 'tall']
| 1,334 | 31.560976 | 92 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/env_utils/__init__.py
|
from .object_properties import RGBCOLORS, SHAPES, SIZES, DUMMY, WEIGHTS
from .task_object import TaskObject
from .task_object_list import TaskObjectList
import numpy as np
def distinguishable_by_primary(goal_obj: TaskObject, non_goal_obj: TaskObject):
if isinstance(goal_obj.primary, RGBCOLORS):
return goal_obj.color != non_goal_obj.color
elif isinstance(goal_obj.primary, SHAPES):
return goal_obj.shape != non_goal_obj.shape
elif isinstance(goal_obj.primary, SIZES):
return goal_obj._size != non_goal_obj._size
elif isinstance(goal_obj.primary, WEIGHTS):
return goal_obj.weight != non_goal_obj.weight
else:
return False
def distinguishable_by_primary_or_secondary(goal_obj: TaskObject, non_goal_obj: TaskObject):
primary_diff = distinguishable_by_primary(goal_obj, non_goal_obj)
secondary_diff = False
if isinstance(goal_obj.secondary, RGBCOLORS):
secondary_diff = goal_obj.color != non_goal_obj.color
elif isinstance(goal_obj.secondary, SHAPES):
secondary_diff = goal_obj.shape != non_goal_obj.shape
elif isinstance(goal_obj.secondary, SIZES):
secondary_diff = goal_obj._size != non_goal_obj._size
elif isinstance(goal_obj.secondary, WEIGHTS):
secondary_diff = goal_obj.weight != non_goal_obj.weight
return np.sum([primary_diff, secondary_diff]) > 0
def dummys_not_goal_props(goal_obj: TaskObject, non_goal_obj: TaskObject):
dummy_props = []
if non_goal_obj.has_dummy_color:
dummy_props.append(non_goal_obj.get_color())
if non_goal_obj.has_dummy_shape:
dummy_props.append(non_goal_obj.get_shape())
if non_goal_obj.has_dummy_size:
dummy_props.append(non_goal_obj.get_size())
if non_goal_obj.has_dummy_weight:
dummy_props.append(non_goal_obj.get_weight())
primary_dummy_same = goal_obj.primary in dummy_props
secondary_dummy_same = goal_obj.secondary in dummy_props
one_overlap = np.sum([primary_dummy_same, secondary_dummy_same]) < 2
return one_overlap and distinguishable_by_primary_or_secondary(goal_obj, non_goal_obj)
def valid_task_object_combination(goal_obj: TaskObject, non_goal_obj: TaskObject):
goal_primary = goal_obj.primary
goal_secondary = goal_obj.secondary
non_goal_primary = non_goal_obj.primary
non_goal_secondary = non_goal_obj.secondary
different_primary = (goal_primary != non_goal_primary)
different_primary_secondary = (goal_primary != non_goal_secondary)
if isinstance(goal_secondary, DUMMY):
if isinstance(non_goal_secondary, DUMMY):
primary_dummy_different = distinguishable_by_primary(goal_obj, non_goal_obj)
return different_primary and primary_dummy_different
else:
return different_primary_secondary and different_primary
elif isinstance(non_goal_secondary, DUMMY):
return dummys_not_goal_props(goal_obj, non_goal_obj)
else:
different_secondary = (goal_secondary != non_goal_secondary)
return distinguishable_by_primary_or_secondary(goal_obj, non_goal_obj) and (different_secondary
or different_primary_secondary)
| 3,240 | 44.013889 | 115 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/env_utils/task_object.py
|
from __future__ import annotations
from typing import Tuple, Union, Any
import numpy as np
from lanro_gym.env_utils.object_properties import WEIGHTS
from lanro_gym.utils import get_one_hot_list
from lanro_gym.env_utils import RGBCOLORS, SHAPES, SIZES, DUMMY
def get_default_enum_index(default_enum_cls, default_enum):
enum_list = [e for e in default_enum_cls]
enum_idx = enum_list.index(default_enum)
return default_enum, enum_idx
class TaskObject:
def __init__(self, sim, primary=None, secondary=None, onehot_idx=None, sec_onehot_idx=None, obj_mass: int = 2):
self.sim = sim
self.primary = primary
self.secondary = DUMMY.OBJECT
self.obj_mass = obj_mass
self.onehot_colors = get_one_hot_list(len(RGBCOLORS))
self.onehot_shapes = get_one_hot_list(len(SHAPES))
self.onehot_sizes = get_one_hot_list(len(SIZES))
self.onehot_weights = get_one_hot_list(len(WEIGHTS))
self.onehot_idx_colors = None
self.onehot_idx_shapes = None
self.onehot_idx_sizes = None
self.onehot_idx_weights = None
self.has_dummy_size = False
self.has_dummy_color = False
self.has_dummy_shape = False
self.has_dummy_weight = False
if type(primary) == type(secondary):
raise ValueError("primary and secondary must be different object types")
if isinstance(primary, SIZES):
self.onehot_idx_sizes = onehot_idx
self._size = primary
self.object_size = primary.value[0]
elif isinstance(secondary, SIZES):
self.secondary = secondary
self._size = secondary
self.object_size = secondary.value[0]
self.onehot_idx_sizes = sec_onehot_idx
else:
self.has_dummy_size = True
dummy_enum, self.onehot_idx_sizes = get_default_enum_index(SIZES, SIZES.MEDIUM)
self._size = dummy_enum
self.object_size = dummy_enum.value[0]
if isinstance(primary, RGBCOLORS):
self.onehot_idx_colors = onehot_idx
self.color = primary
elif isinstance(secondary, RGBCOLORS):
self.secondary = secondary
self.color = secondary
self.onehot_idx_colors = sec_onehot_idx
else:
self.has_dummy_color = True
self.color, self.onehot_idx_colors = get_default_enum_index(RGBCOLORS, RGBCOLORS.RED)
if isinstance(primary, SHAPES):
self.onehot_idx_shapes = onehot_idx
self.shape = primary
elif isinstance(secondary, SHAPES):
self.secondary = secondary
self.shape = secondary
self.onehot_idx_shapes = sec_onehot_idx
else:
self.has_dummy_shape = True
_dummy_enum, self.onehot_idx_shapes = get_default_enum_index(SHAPES, SHAPES.CUBE)
self.shape = _dummy_enum
if isinstance(primary, WEIGHTS):
self.onehot_idx_weights = onehot_idx
self.weight = primary
self.obj_mass = self.obj_mass * primary.value[0]
elif isinstance(secondary, WEIGHTS):
self.secondary = secondary
self.weight = secondary
self.onehot_idx_weights = sec_onehot_idx
self.obj_mass = self.obj_mass * secondary.value[0]
else:
self.has_dummy_weight = True
_dummy_enum, self.onehot_idx_weights = get_default_enum_index(WEIGHTS, WEIGHTS.LIGHT)
self.weight = _dummy_enum
self.obj_mass = self.obj_mass * _dummy_enum.value[0]
def __hash__(self):
return hash(self.primary) ^ hash(self.secondary)
def load(self, object_body_key):
if self.shape == SHAPES.CUBE:
self.sim.create_box(
body_name=object_body_key,
half_extents=[
self.object_size / 2,
self.object_size / 2,
self.object_size / 2,
],
mass=self.obj_mass,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=self.color.value[0] + [1],
)
elif self.shape == SHAPES.CUBOID:
self.sim.create_box(
body_name=object_body_key,
half_extents=[
self.object_size / 2 * 2,
self.object_size / 2 * 0.75,
self.object_size / 2 * 0.75,
],
mass=self.obj_mass,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=self.color.value[0] + [1],
)
elif self.shape == SHAPES.CYLINDER:
self.sim.create_cylinder(
body_name=object_body_key,
radius=self.object_size * 0.5,
height=self.object_size * 0.75,
mass=self.obj_mass * 3,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=self.color.value[0] + [1],
lateral_friction=1.0,
spinning_friction=0.005,
)
def get_properties(self) -> Tuple:
return self.primary, self.secondary
def get_color(self) -> RGBCOLORS:
return self.color
def get_shape(self) -> Union[SHAPES, Any]:
return self.shape
def get_size(self) -> Union[SIZES, Any]:
return self._size
def get_weight(self) -> Union[WEIGHTS, Any]:
return self.weight
def __eq__(self, __o: TaskObject) -> bool:
"""Two task objects are equal if they have at least three properties in common."""
same_color = self.color == __o.color
same_shape = self.shape == __o.shape
same_size = self._size == __o._size
same_weight = self.weight == __o.weight
return np.sum([same_color, same_shape, same_size, same_weight]) > 2
def get_onehot(self):
size_onehot = self.onehot_sizes[self.onehot_idx_sizes]
color_onehot = self.onehot_colors[self.onehot_idx_colors]
shape_onehot = self.onehot_shapes[self.onehot_idx_shapes]
weight_onehot = self.onehot_weights[self.onehot_idx_weights]
return np.concatenate([size_onehot, color_onehot, shape_onehot, weight_onehot])
def get_color_strs(self):
return [list(RGBCOLORS)[self.onehot_idx_colors].name.lower()] + list(RGBCOLORS)[self.onehot_idx_colors].value[1]
def get_shape_strs(self):
return [list(SHAPES)[self.onehot_idx_shapes].name.lower()] + list(SHAPES)[self.onehot_idx_shapes].value[1]
def get_size_strs(self):
return [list(SIZES)[self.onehot_idx_sizes].name.lower()] + list(SIZES)[self.onehot_idx_sizes].value[1]
def get_weight_strs(self):
return [list(WEIGHTS)[self.onehot_idx_weights].name.lower()] + list(WEIGHTS)[self.onehot_idx_weights].value[1]
| 6,856 | 37.960227 | 120 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/env/base.py
|
import os
from typing import Any, Dict, Optional, Tuple, Union
import gymnasium as gym
from gymnasium.utils import seeding
import numpy as np
from lanro_gym.robots import PyBulletRobot
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.core import LanguageTask, Task
gym.logger.set_level(40)
DEBUG = int("DEBUG" in os.environ and os.environ["DEBUG"])
class BaseEnv(gym.Env):
"""
BaseEnv is a goal-conditoned Gym environment that inherits from `gym.Env`.
"""
obs_low: float = -240.0
obs_high: float = 240.0
def __init__(self,
sim: PyBulletSimulation,
robot: PyBulletRobot,
task: Union[Task, LanguageTask],
obs_type: str = "state"):
self.sim = sim
self.metadata = {"render_modes": ["human", "rgb_array"], 'video.frames_per_second': int(np.round(1 / sim.dt))}
self.reward_range = (-1.0, 0.0)
self.robot = robot
self.action_space = self.robot.action_space
self.task = task
self.obs_type = obs_type
def close(self) -> None:
self.sim.close()
def _get_obs(self):
raise NotImplementedError
def getKeyboardEvents(self) -> Dict[int, int]:
return self.sim.bclient.getKeyboardEvents()
def reset(self,
seed: Optional[int] = None,
options: Optional[dict] = None) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:
super().reset(seed=seed, options=options)
self.task.np_random, seed = seeding.np_random(seed)
with self.sim.no_rendering():
self.robot.reset()
self.task.reset()
info = {"is_success": False}
return self._get_obs(), info
def compute_reward(self, achieved_goal, desired_goal, info):
return self.task.compute_reward(achieved_goal, desired_goal, info)
def render(self, mode="human"):
return self.sim.render(mode)
| 1,945 | 30.387097 | 118 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/env/goal.py
|
import os
from typing import Dict, List, Tuple, Optional, Any
import gymnasium as gym
from gymnasium import spaces
import numpy as np
from lanro_gym.robots import PyBulletRobot
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.core import Task
from lanro_gym.env import BaseEnv
gym.logger.set_level(40)
DEBUG = int("DEBUG" in os.environ and os.environ["DEBUG"])
class GoalEnv(BaseEnv):
ep_end_goal_distance: List = []
def __init__(self, sim: PyBulletSimulation, robot: PyBulletRobot, task: Task, obs_type: str = "state"):
BaseEnv.__init__(self, sim, robot, task, obs_type)
obs, _ = self.reset()
self.observation_space = spaces.Dict(
dict(
observation=spaces.Box(low=self.obs_low,
high=self.obs_high,
shape=obs["observation"].shape,
dtype=np.float32),
desired_goal=spaces.Box(low=self.obs_low,
high=self.obs_high,
shape=obs["desired_goal"].shape,
dtype=np.float32),
achieved_goal=spaces.Box(low=self.obs_low,
high=self.obs_high,
shape=obs["achieved_goal"].shape,
dtype=np.float32),
))
def _get_obs(self) -> Dict[str, np.ndarray]:
robot_obs = self.robot.get_obs()
task_obs = self.task.get_obs()
observation = np.concatenate([robot_obs, task_obs])
achieved_goal = self.task.get_achieved_goal()
desired_goal = self.task.get_goal()
return {
"observation": observation.copy(),
"achieved_goal": achieved_goal.copy(),
"desired_goal": desired_goal.copy(),
}
def step(self, action) -> Tuple[Dict[str, np.ndarray], bool, bool, Dict]:
action = np.clip(action, self.action_space.low, self.action_space.high)
self.robot.set_action(action)
self.sim.step()
obs = self._get_obs()
truncated = False
info = {
"is_success": self.task.is_success(obs["achieved_goal"], obs["desired_goal"]),
}
terminated = bool(info["is_success"])
reward = self.compute_reward(obs["achieved_goal"], obs["desired_goal"], info)
return obs, reward, terminated, truncated, info
def reset(self,
seed: Optional[int] = None,
options: Optional[dict] = None) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:
self.ep_end_goal_distance.append(self.task.last_distance)
obs, info = super().reset(seed=seed, options=options)
info["is_success"] = self.task.is_success(obs["achieved_goal"], obs["desired_goal"])
return obs, info
def get_metrics(self) -> Dict:
return {"avg_terminal_goal_distance": round(np.mean(self.ep_end_goal_distance), 3)}
| 3,056 | 39.223684 | 107 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/env/language.py
|
import os
from typing import Dict, Set, Tuple, Any, Optional
import gymnasium as gym
from gymnasium import spaces
import numpy as np
from lanro_gym.language_utils import Vocabulary, parse_instructions
from lanro_gym.robots import PyBulletRobot
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.core import LanguageTask
from lanro_gym.env_utils import SHAPES, RGBCOLORS, SIZES, WEIGHTS
from lanro_gym.env import BaseEnv
gym.logger.set_level(40)
DEBUG = int("DEBUG" in os.environ and os.environ["DEBUG"])
class LanguageEnv(BaseEnv):
"""
RobotLanguageEnv is a language-conditioned implementation of `BaseEnv` with a language-specific Gym API.
"""
discovered_word_idxs: Set = set()
def __init__(self, sim: PyBulletSimulation, robot: PyBulletRobot, task: LanguageTask, obs_type: str = "state"):
BaseEnv.__init__(self, sim, robot, task, obs_type)
instruction_list = self.task.get_all_instructions()
if DEBUG:
print("AMOUNT OF INSTRUCTIONS", len(instruction_list))
self.word_list, self.max_instruction_len = parse_instructions(instruction_list)
self.vocab = Vocabulary(self.word_list)
obs, _ = self.reset()
self.compute_reward = self.task.compute_reward
instruction_index_space = spaces.Box(0, len(self.vocab), shape=(self.max_instruction_len, ), dtype=np.uint16)
if self.obs_type == "state":
self.observation_space = spaces.Dict(
dict(
observation=spaces.Box(low=self.obs_low,
high=self.obs_high,
shape=obs["observation"].shape,
dtype=np.float32),
instruction=instruction_index_space,
))
elif self.obs_type == "pixel":
self.observation_space = spaces.Dict(
dict(
observation=spaces.Box(low=0, high=255, shape=obs['observation'].shape, dtype=np.uint8),
instruction=instruction_index_space,
))
def get_vocab_by_properties(self):
vocab_properties = {}
for word in self.vocab.word2idx.keys():
if word in np.concatenate([shape.value[1] for shape in SHAPES]):
vocab_properties[word] = 'shape'
elif word in [shape.name.lower() for shape in RGBCOLORS]:
vocab_properties[word] = 'color'
elif word in np.concatenate([_size.value[1] for _size in SIZES]):
vocab_properties[word] = 'size'
elif word in np.concatenate([_weight.value[1] for _weight in WEIGHTS]):
vocab_properties[word] = 'size'
elif word in self.task.action_verbs:
vocab_properties[word] = 'action'
else:
vocab_properties[word] = 'none'
return vocab_properties
def _get_obs(self) -> Dict[str, np.ndarray]:
robot_obs = self.robot.get_obs()
task_obs = self.task.get_obs()
if self.obs_type == "pixel":
observation = self.robot.get_camera_img().copy()
else:
observation = np.concatenate([robot_obs, task_obs])
if self.sim.render_on:
_ = self.robot.get_camera_img()
current_goal_string = self.task.get_goal()
word_indices = self.encode_instruction(self.pad_instruction(current_goal_string))
return {"observation": observation.copy(), "instruction": word_indices}
def reset(self,
seed: Optional[int] = None,
options: Optional[dict] = None) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:
obs, info = super().reset(seed=seed, options=options)
info["is_success"] = self.task.is_success()
return obs, info
def pad_instruction(self, goal_string) -> str:
_pad_diff = self.max_instruction_len - len(goal_string.split(' '))
if _pad_diff:
goal_string += ' ' + ' '.join(['<pad>'] * _pad_diff)
return goal_string
def get_vocab(self) -> Vocabulary:
return self.vocab
def get_max_instruction_len(self) -> int:
return self.max_instruction_len
def encode_instruction(self, instruction: str) -> np.ndarray:
word_indices = [self.vocab.word_to_idx(word) for word in instruction.split(' ')]
return np.array(word_indices)
def decode_instruction(self, instruction_embedding) -> str:
words = [self.vocab.idx_to_word(idx) for idx in instruction_embedding]
return ' '.join(words)
def step(self, action) -> Tuple[Dict[str, np.ndarray], bool, bool, Dict]:
action = np.clip(action, self.action_space.low, self.action_space.high)
self.robot.set_action(action)
self.sim.step()
self.task.return_delayed_action_repair()
obs = self._get_obs()
self.discovered_word_idxs.update(obs['instruction'])
info = {
"is_success": self.task.is_success(),
}
reward = self.compute_reward()
terminated = bool(info["is_success"])
truncated = False
# HI created, add to info dict and terminate episode
if reward == -10.0:
terminated = True
h_instr = self.pad_instruction(self.task.hindsight_instruction)
info['hindsight_instruction_language'] = h_instr
info['hindsight_instruction'] = self.encode_instruction(h_instr)
self.discovered_word_idxs.update(info['hindsight_instruction'])
# NOTE: set reward to normal punishment, as we like, e.g., NLReach
# and NLReachHI to behave the same way
reward = -1.0
return obs, reward, terminated, truncated, info
def get_metrics(self) -> Dict[str, Any]:
""" Returns a dict of environment metrics"""
return {
"vocab_discovery_rate": round(len(self.discovered_word_idxs) / (len(self.vocab) - 1), 2),
**self.task.get_task_metrics(),
}
| 6,061 | 40.520548 | 117 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/env/__init__.py
|
from .base import BaseEnv
from .goal import GoalEnv
from .language import LanguageEnv
| 86 | 20.75 | 33 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/nllift.py
|
import numpy as np
from lanro_gym.robots import PyBulletRobot
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.core import LanguageTask
from lanro_gym.language_utils import create_commands
class NLLift(LanguageTask):
def __init__(self,
sim: PyBulletSimulation,
robot: PyBulletRobot,
obj_xy_range: float = 0.3,
num_obj: int = 2,
min_goal_height: float = 0.0,
max_goal_height: float = 0.1,
use_hindsight_instructions: bool = False,
use_action_repair: bool = False,
delay_action_repair: bool = False,
use_negations_action_repair: bool = False,
use_synonyms: bool = False,
mode: str = 'Color'):
super().__init__(sim, robot, mode, use_hindsight_instructions, use_action_repair, delay_action_repair,
use_negations_action_repair, num_obj, use_synonyms)
self.max_goal_height = max_goal_height
self.min_goal_height = min_goal_height
self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
self.action_verbs = ["lift", "raise", "hoist"]
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer()
def reset(self) -> None:
self.sample_task_objects()
for idx, obj_pos in zip(self.obj_indices_selection, self._sample_objects()):
self.sim.set_base_pose(f"object{idx}", obj_pos, [0, 0, 0, 1])
self._sample_goal()
self.ep_height_threshold = self.np_random.uniform(low=self.min_goal_height, high=self.max_goal_height)
# similar to the pick and place task, the goal height is 0 at least 30% of the time
if self.np_random.random() < 0.3:
self.ep_height_threshold = 0
self.reset_hi_and_ar()
def grasped_and_lifted(self, obj_body_key):
obj_pos = np.array(self.sim.get_base_position(obj_body_key))
hit_obj_id = self.robot.gripper_ray_obs()[0]
obj_id = self.sim.get_object_id(obj_body_key)
all_fingers_have_contact = np.all(self.get_contact_with_fingers(obj_body_key))
achieved_min_height = obj_pos[-1] > self.ep_height_threshold
inside_gripper = hit_obj_id == obj_id
return all_fingers_have_contact and achieved_min_height and inside_gripper
def is_success(self):
return self.grasped_and_lifted(self.goal_object_body_key)
def compute_reward(self) -> float:
if self.is_success():
return self.generate_action_repair_or_success()
elif self.ep_hindsight_instruction and not self.ep_hindsight_instruction_returned:
for other_object_idx in self.non_goal_body_indices:
# if grasped with both fingers and being at a certain height
if self.grasped_and_lifted(f"object{other_object_idx}"):
self.generate_hindsight_instruction(other_object_idx)
return -10.
elif self.ep_action_repair and not self.ep_action_repair_returned:
for other_object_idx in self.non_goal_body_indices:
# if grasped with both fingers and being at a certain height
if self.grasped_and_lifted(f"object{other_object_idx}"):
if self.use_negations_action_repair and self.np_random.random() < 0.5:
# action correction with negation
# "lift the red object" -> lifts green object -> correction "no not the green object"
target_property_tuple = self.task_object_list.objects[other_object_idx].get_properties()
repair_commands = create_commands("negation",
target_property_tuple,
use_synonyms=self.use_synonyms)
else:
# additional feedback for the goal object, lifting a wrong object
# "lift the red block" -> *lifts green block* -> "the red block!"
target_property_tuple = self.task_object_list.objects[self.goal_obj_idx].get_properties()
repair_commands = create_commands("repair",
target_property_tuple,
use_synonyms=self.use_synonyms)
self.merge_instruction_action_repair(repair_commands)
return -1.0
return -1.0
| 4,735 | 53.436782 | 113 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/nlgrasp.py
|
from lanro_gym.robots import PyBulletRobot
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.nllift import NLLift
class NLGrasp(NLLift):
def __init__(self,
sim: PyBulletSimulation,
robot: PyBulletRobot,
obj_xy_range: float = 0.3,
num_obj: int = 2,
min_goal_height: float = 0.0,
max_goal_height: float = 0.01,
use_hindsight_instructions: bool = False,
use_action_repair: bool = False,
delay_action_repair: bool = False,
use_negations_action_repair: bool = False,
use_synonyms: bool = False,
mode: str = 'Color'):
super().__init__(sim,
robot,
obj_xy_range=obj_xy_range,
num_obj=num_obj,
min_goal_height=min_goal_height,
max_goal_height=max_goal_height,
use_hindsight_instructions=use_hindsight_instructions,
use_action_repair=use_action_repair,
delay_action_repair=delay_action_repair,
use_negations_action_repair=use_negations_action_repair,
use_synonyms=use_synonyms,
mode=mode)
self.action_verbs = ["grasp", "grip", "grab"]
| 1,451 | 40.485714 | 81 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/push.py
|
import numpy as np
from lanro_gym.tasks.core import Task
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.scene import basic_scene
from lanro_gym.env_utils import RGBCOLORS
class Push(Task):
def __init__(
self,
sim: PyBulletSimulation,
reward_type: str = "sparse",
distance_threshold: float = 0.05,
goal_xy_range: float = 0.3,
obj_xy_range: float = 0.3,
):
self.sim = sim
self.reward_type = reward_type
self.distance_threshold = distance_threshold
self.goal_range_low = np.array([-goal_xy_range / 2, -goal_xy_range / 2, 0])
self.goal_range_high = np.array([goal_xy_range / 2, goal_xy_range / 2, 0])
self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer()
def _create_scene(self) -> None:
basic_scene(self.sim)
self.sim.create_box(
body_name="object",
half_extents=[
self.object_size / 2,
self.object_size / 2,
self.object_size / 2,
],
mass=2.0,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=RGBCOLORS.RED.value[0] + [1],
)
self.sim.create_box(
body_name="target",
half_extents=[
self.object_size / 2,
self.object_size / 2,
self.object_size / 2,
],
mass=0.0,
ghost=True,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=RGBCOLORS.RED.value[0] + [0.3],
)
def get_obs(self) -> np.ndarray:
obj_key = "object"
# position, rotation of the object
object_position = np.array(self.sim.get_base_position(obj_key))
object_rotation = np.array(self.sim.get_base_rotation(obj_key))
object_velocity = np.array(self.sim.get_base_velocity(obj_key))
object_angular_velocity = np.array(self.sim.get_base_angular_velocity(obj_key))
observation = np.concatenate([
object_position,
object_rotation,
object_velocity,
object_angular_velocity,
])
return observation
def get_achieved_goal(self) -> np.ndarray:
object_position = np.array(self.sim.get_base_position("object"))
return object_position.copy()
def reset(self) -> None:
self.goal = self._sample_goal()
object_position = self._sample_object()
self.sim.set_base_pose("target", self.goal.tolist(), [0, 0, 0, 1])
self.sim.set_base_pose("object", object_position.tolist(), [0, 0, 0, 1])
def _sample_goal(self) -> np.ndarray:
"""Randomize goal."""
goal = [0.0, 0.0, self.object_size / 2] # z offset for the cube center
noise = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
goal += noise
return goal.copy()
def _sample_object(self) -> np.ndarray:
"""Randomize start position of object."""
object_position = [0.0, 0.0, self.object_size / 2]
noise = self.np_random.uniform(self.obj_range_low, self.obj_range_high)
object_position += noise
return object_position.copy()
| 3,437 | 35.574468 | 87 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/__init__.py
|
# goal-conditioned tasks
from .reach import Reach
from .push import Push
from .stack import Stack
from .slide import Slide
# language-conditioned tasks
from .nlreach import NLReach
from .nlpush import NLPush
from .nllift import NLLift
from .nlgrasp import NLGrasp
| 265 | 21.166667 | 28 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/stack.py
|
import numpy as np
from typing import Tuple
from lanro_gym.tasks.core import Task
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.scene import basic_scene
from lanro_gym.env_utils import RGBCOLORS
class Stack(Task):
def __init__(
self,
sim: PyBulletSimulation,
reward_type: str = "sparse",
distance_threshold: float = 0.05,
goal_xy_range: float = 0.3,
obj_xy_range: float = 0.3,
goal_z_range: float = 0.0,
num_obj: int = 2,
):
self.sim = sim
self.reward_type = reward_type
self.distance_threshold = distance_threshold
self.goal_z_range = goal_z_range
self.goal_range_low = np.array([-goal_xy_range / 2, -goal_xy_range / 2, 0])
self.goal_range_high = np.array([goal_xy_range / 2, goal_xy_range / 2, goal_z_range])
self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
assert num_obj <= 4, "Maximum number of objects is 4"
self.obj_colors = [RGBCOLORS.RED, RGBCOLORS.GREEN, RGBCOLORS.BLUE, RGBCOLORS.YELLOW][:num_obj]
self.num_obj = num_obj
self.goal_offsets = [1, 3, 5, 7]
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer()
def _create_scene(self):
basic_scene(self.sim)
for idx, obj_color in enumerate(self.obj_colors):
self.sim.create_box(
body_name=f"object{idx}",
half_extents=[
self.object_size / 2,
self.object_size / 2,
self.object_size / 2,
],
mass=2.0,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=obj_color.value[0] + [1],
)
self.sim.create_sphere(
body_name=f"target{idx}",
radius=self.object_size / 2,
mass=0.0,
ghost=True,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=obj_color.value[0] + [0.3],
)
def get_obs(self) -> np.ndarray:
observation = []
for idx in range(self.num_obj):
obj_key = f"object{idx}"
# position, rotation of the object
object_position = np.array(self.sim.get_base_position(obj_key))
object_rotation = np.array(self.sim.get_base_rotation(obj_key))
object_velocity = np.array(self.sim.get_base_velocity(obj_key))
object_angular_velocity = np.array(self.sim.get_base_angular_velocity(obj_key))
observation.extend([object_position, object_rotation, object_velocity, object_angular_velocity])
return np.concatenate(observation)
def get_achieved_goal(self) -> np.ndarray:
achieved_goals = [self.sim.get_base_position(f"object{idx}") for idx in range(self.num_obj)]
return np.concatenate(achieved_goals).copy()
def _sample_objects(self) -> Tuple:
obj_positions = [[0.0, 0.0, self.object_size / 2] +
self.np_random.uniform(self.obj_range_low, self.obj_range_high)
for _ in range(len(self.obj_colors))]
return tuple(obj_positions)
def reset(self) -> None:
self.goal = self._sample_goal()
for idx, obj_pos in enumerate(self._sample_objects()):
self.sim.set_base_pose(f"target{idx}", self.goal[3 * idx:3 * (idx + 1)].tolist(), [0, 0, 0, 1])
self.sim.set_base_pose(f"object{idx}", obj_pos.tolist(), [0, 0, 0, 1])
def _sample_goal(self) -> np.ndarray:
goal = []
goal_noise = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
for idx in range(self.num_obj):
# if we have the first object and use goal_z_range, assign a goal
# height of 0.0 at least 30% of the time
if idx == 0 and self.goal_z_range > 0 and self.np_random.random() < 0.3:
goal_noise[0] = 0.0
goal.append([0.0, 0.0, self.goal_offsets[idx] * self.object_size / 2] +
goal_noise) # with z offset factor for stacking goals
return np.concatenate((goal)).copy()
| 4,320 | 41.362745 | 108 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/reach.py
|
import numpy as np
from typing import Callable
from lanro_gym.tasks.core import Task
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.scene import basic_scene
from lanro_gym.env_utils import RGBCOLORS
class Reach(Task):
def __init__(self,
sim: PyBulletSimulation,
get_ee_position: Callable[[], np.ndarray],
reward_type: str = "sparse",
distance_threshold: float = 0.025,
goal_range: float = 0.3):
self.sim = sim
self.reward_type = reward_type
self.distance_threshold = distance_threshold
self.get_ee_position = get_ee_position
self.goal_range_low = np.array([-goal_range / 2, -goal_range / 2, 0])
self.goal_range_high = np.array([goal_range / 2, goal_range / 2, goal_range])
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer()
def _create_scene(self) -> None:
basic_scene(self.sim)
self.sim.create_sphere(
body_name="target",
radius=self.distance_threshold,
mass=0.0,
ghost=True,
position=[0.0, 0.0, 0.0],
rgba_color=RGBCOLORS.RED.value[0] + [0.3],
)
def get_obs(self) -> np.ndarray:
return np.array([])
def get_achieved_goal(self) -> np.ndarray:
return self.get_ee_position()
def reset(self) -> None:
self.goal = self._sample_goal()
self.sim.set_base_pose("target", self.goal.tolist(), [0, 0, 0, 1])
def _sample_goal(self) -> np.ndarray:
return self.np_random.uniform(self.goal_range_low, self.goal_range_high)
| 1,696 | 32.94 | 85 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/slide.py
|
import numpy as np
from lanro_gym.tasks.core import Task
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.scene import basic_scene
from lanro_gym.env_utils import RGBCOLORS
SLIDE_OBJ_SIZE: float = 0.06
class Slide(Task):
def __init__(
self,
sim: PyBulletSimulation,
reward_type: str = "sparse",
distance_threshold: float = 0.05,
goal_xy_range: float = 0.3,
goal_x_offset: float = 0.4,
obj_xy_range: float = 0.3,
):
self.sim = sim
self.reward_type = reward_type
self.distance_threshold = distance_threshold
self.object_size = SLIDE_OBJ_SIZE
self.goal_range_low = np.array([-goal_xy_range / 2 + goal_x_offset, -goal_xy_range / 2, 0])
self.goal_range_high = np.array([goal_xy_range / 2 + goal_x_offset, goal_xy_range / 2, 0])
self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer()
def _create_scene(self) -> None:
basic_scene(self.sim, table_length=1.2, table_x_offset=0.1, plane_x_pos=0., plane_length=1)
self.sim.create_cylinder(
body_name="object",
mass=2.0,
radius=self.object_size / 2,
height=self.object_size / 2,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=RGBCOLORS.RED.value[0] + [1],
lateral_friction=0.1,
spinning_friction=0.005,
)
self.sim.create_cylinder(
body_name="target",
mass=0.0,
ghost=True,
radius=self.object_size / 2,
height=self.object_size / 2,
position=[0.0, 0.0, self.object_size / 2],
rgba_color=RGBCOLORS.RED.value[0] + [0.3],
)
def get_obs(self) -> np.ndarray:
obj_key = "object"
# position, rotation of the object
object_position = np.array(self.sim.get_base_position(obj_key))
object_rotation = np.array(self.sim.get_base_rotation(obj_key))
object_velocity = np.array(self.sim.get_base_velocity(obj_key))
object_angular_velocity = np.array(self.sim.get_base_angular_velocity(obj_key))
observation = np.concatenate([
object_position,
object_rotation,
object_velocity,
object_angular_velocity,
])
return observation
def get_achieved_goal(self) -> np.ndarray:
object_position = np.array(self.sim.get_base_position("object"))
return object_position.copy()
def reset(self) -> None:
self.goal = self._sample_goal()
object_position = self._sample_object()
self.sim.set_base_pose("target", self.goal.tolist(), [0, 0, 0, 1])
self.sim.set_base_pose("object", object_position.tolist(), [0, 0, 0, 1])
def _sample_goal(self) -> np.ndarray:
"""Randomize goal."""
goal = [0.0, 0.0, self.object_size / 2]
noise = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
goal += noise
return goal.copy()
def _sample_object(self) -> np.ndarray:
"""Randomize start position of object."""
object_position = [0.0, 0.0, self.object_size / 2]
noise = self.np_random.uniform(self.obj_range_low, self.obj_range_high)
object_position += noise
return object_position.copy()
| 3,549 | 37.172043 | 99 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/nlreach.py
|
from lanro_gym.robots import PyBulletRobot
import numpy as np
from lanro_gym.tasks.core import LanguageTask
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.utils import goal_distance
from lanro_gym.language_utils import create_commands
class NLReach(LanguageTask):
def __init__(self,
sim: PyBulletSimulation,
robot: PyBulletRobot,
obj_xy_range: float = 0.3,
num_obj: int = 2,
use_hindsight_instructions: bool = False,
use_action_repair: bool = False,
delay_action_repair: bool = False,
use_negations_action_repair: bool = False,
use_synonyms: bool = False,
mode: str = 'Color'):
super().__init__(sim, robot, mode, use_hindsight_instructions, use_action_repair, delay_action_repair,
use_negations_action_repair, num_obj, use_synonyms)
self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
self.action_verbs = ["touch", "reach", "contact"]
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer()
def reset(self) -> None:
self.sample_task_objects()
self.obj_init_pos = self._sample_objects()
for idx, obj_pos in zip(self.obj_indices_selection, self.obj_init_pos):
self.sim.set_base_pose(f"object{idx}", obj_pos.tolist(), [0, 0, 0, 1])
self._sample_goal()
self.reset_hi_and_ar()
def is_success(self):
# NOTE: objects should stay in place with maximum positional change \eta to initial position
current_obj_pos = np.concatenate(
[np.array(self.sim.get_base_position(f"object{idx}")) for idx in self.obj_indices_selection])
close_to_init_pos = goal_distance(np.concatenate(self.obj_init_pos), current_obj_pos) < 0.025
# check if ticked correct object
return np.any(self.get_contact_with_fingers(self.goal_object_body_key)) and close_to_init_pos
def compute_reward(self) -> float:
if self.is_success():
return self.generate_action_repair_or_success()
elif self.ep_hindsight_instruction and not self.ep_hindsight_instruction_returned:
for other_object_idx in self.non_goal_body_indices:
_non_goal_body = f"object{other_object_idx}"
# if touched with at least one finger
if np.any(self.get_contact_with_fingers(_non_goal_body)):
self.generate_hindsight_instruction(other_object_idx)
return -10.
elif self.ep_action_repair and not self.ep_action_repair_returned:
for other_object_idx in self.non_goal_body_indices:
_non_goal_body = f"object{other_object_idx}"
# if touched with at least one finger
if np.any(self.get_contact_with_fingers(_non_goal_body)):
if self.use_negations_action_repair and self.np_random.random() < 0.5:
# action correction with negation
# "reach the red object" -> reaches green object -> correction "no not the green object"
target_property_tuple = self.task_object_list.objects[other_object_idx].get_properties()
repair_commands = create_commands("negation",
target_property_tuple,
use_synonyms=self.use_synonyms)
else:
# additional feedback for the goal object, touching a wrong object
# "touch the red block" -> *touches green block* -> "the red block!"
target_property_tuple = self.task_object_list.objects[self.goal_obj_idx].get_properties()
repair_commands = create_commands("repair",
target_property_tuple,
use_synonyms=self.use_synonyms)
self.merge_instruction_action_repair(repair_commands)
return -1.0
return -1.0
| 4,365 | 54.265823 | 113 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/scene.py
|
from lanro_gym.simulation import PyBulletSimulation
PLANE_COLOR = [0.37, 0.37, 0.37, 1]
TABLE_COLOR = [0.95, 0.95, 0.95, 1]
def basic_scene(
sim: PyBulletSimulation,
plane_z_offset: float = -0.4,
plane_x_pos: float = -0.2,
plane_length: float = 0.8,
table_length: float = 0.8,
table_width: float = 0.8,
table_height: float = 0.4,
table_x_offset: float = -0.1,
table_z_offset: float = 0.0,
table_friction: float = 1.0,
table_spinning_friction: float = 0.005,
):
sim.create_box(
body_name="plane",
half_extents=[plane_length, 0.8, 0.01],
mass=0,
position=[plane_x_pos, 0.0, plane_z_offset - 0.01],
specular_color=[0.0, 0.0, 0.0],
rgba_color=PLANE_COLOR,
)
sim.create_box(
body_name="table",
half_extents=[table_length / 2, table_width / 2, table_height / 2],
mass=0,
position=[table_x_offset, 0.0, -table_height / 2 + table_z_offset],
specular_color=[0.0, 0.0, 0.0],
rgba_color=TABLE_COLOR,
lateral_friction=table_friction,
spinning_friction=table_spinning_friction,
)
sim.create_box(
body_name="robot_platform",
half_extents=[0.175, 0.175, 0.2],
mass=0,
position=[-0.675, 0.0, -0.2],
specular_color=[0.0, 0.0, 0.0],
rgba_color=TABLE_COLOR,
lateral_friction=table_friction,
)
| 1,413 | 29.085106 | 75 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/nlpush.py
|
import numpy as np
from lanro_gym.robots import PyBulletRobot
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.core import LanguageTask
from lanro_gym.utils import goal_distance
from lanro_gym.language_utils import create_commands
class NLPush(LanguageTask):
def __init__(self,
sim: PyBulletSimulation,
robot: PyBulletRobot,
obj_xy_range: float = 0.3,
num_obj: int = 2,
use_hindsight_instructions: bool = False,
use_action_repair: bool = False,
delay_action_repair: bool = False,
use_negations_action_repair: bool = False,
use_synonyms: bool = False,
mode: str = 'Color'):
super().__init__(sim, robot, mode, use_hindsight_instructions, use_action_repair, delay_action_repair,
use_negations_action_repair, num_obj, use_synonyms)
self.min_push_distance = 0.025
self.max_push_distance = 0.075
self.max_height_change = self.object_size
self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
self.action_verbs = ["push", "move", "shift"]
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer()
def show_goal_boundary(self):
if self.sim.render_on:
self.sim.remove_body('target')
target_pos = self.obj_init_pos_dict[self.goal_object_body_key]
target_pos[-1] = 0
self.sim.create_cylinder(
body_name="target",
mass=0.0,
ghost=True,
radius=self.ep_push_distance,
height=0.001,
position=target_pos,
rgba_color=self.task_object_list.objects[self.goal_obj_idx].get_color().value[0] + [0.3],
)
def reset(self) -> None:
self.obj_init_pos_dict = {}
self.sample_task_objects()
self.obj_init_pos = self._sample_objects()
for idx, obj_pos in zip(self.obj_indices_selection, self.obj_init_pos):
obj_key = f"object{idx}"
self.sim.set_base_pose(obj_key, obj_pos.tolist(), [0, 0, 0, 1])
self.obj_init_pos_dict[obj_key] = np.array(obj_pos)
self._sample_goal()
self.ep_push_distance = self.np_random.uniform(low=self.min_push_distance, high=self.max_push_distance)
# visualize goal region
self.show_goal_boundary()
self.reset_hi_and_ar()
def is_success(self):
init_goal_pos = self.obj_init_pos_dict[self.goal_object_body_key]
current_goal_pos = np.array(self.sim.get_base_position(self.goal_object_body_key))
return self.detect_push_motion(init_goal_pos, current_goal_pos)
def detect_push_motion(self, inital_pos, current_pos) -> bool:
assert len(inital_pos) == 3
assert len(current_pos) == 3
change_xy = goal_distance(inital_pos[:2], current_pos[:2])
change_z = goal_distance(inital_pos[-1:], current_pos[-1:])
# let change only happen for x and y, and keep z as close as possible
# this should prevent lifting the object or throwing it to the ground
return change_xy > self.ep_push_distance and change_z < self.max_height_change
def moved_other_object(self):
for other_object_idx in self.non_goal_body_indices:
_non_goal_body = f"object{other_object_idx}"
init_obj_pos = self.obj_init_pos_dict[_non_goal_body]
current_obj_pos = np.array(self.sim.get_base_position(_non_goal_body))
if self.detect_push_motion(init_obj_pos, current_obj_pos):
return other_object_idx
else:
return None
def compute_reward(self) -> float:
if self.is_success():
_result = self.generate_action_repair_or_success()
# update visualization of goal region
self.show_goal_boundary()
return _result
elif self.ep_hindsight_instruction and not self.ep_hindsight_instruction_returned:
other_object_idx = self.moved_other_object()
if other_object_idx:
self.generate_hindsight_instruction(other_object_idx)
return -10.
elif self.ep_action_repair and not self.ep_action_repair_returned:
other_object_idx = self.moved_other_object()
if other_object_idx:
if self.use_negations_action_repair and self.np_random.random() < 0.5:
# action correction with negation
# "push the red object" -> pushes green object -> correction "no not the green object"
target_property_tuple = self.task_object_list.objects[other_object_idx].get_properties()
repair_commands = create_commands("negation", target_property_tuple, use_synonyms=self.use_synonyms)
else:
# additional feedback for the goal object, pushing a wrong object
# "push the red block" -> *pushes green block* -> "the red block!"
target_property_tuple = self.task_object_list.objects[self.goal_obj_idx].get_properties()
repair_commands = create_commands("repair", target_property_tuple, use_synonyms=self.use_synonyms)
self.show_goal_boundary()
self.merge_instruction_action_repair(repair_commands)
return -1.0
return -1.0
| 5,625 | 47.921739 | 120 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/core/task.py
|
from typing import Dict
import numpy as np
from lanro_gym.utils import goal_distance
class Task:
distance_threshold: float = 0.05
reward_type: str = "sparse"
last_distance: float = 0
object_size: float = 0.04
goal: np.ndarray
np_random: np.random.Generator
def get_goal(self) -> np.ndarray:
"""Return the current goal."""
return self.goal.copy()
def get_obs(self):
"""Return the observation associated to the task."""
raise NotImplementedError
def get_achieved_goal(self):
"""Return the achieved goal."""
raise NotImplementedError
def reset(self):
"""Reset the task"""
raise NotImplementedError
def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> float:
distance = goal_distance(achieved_goal, desired_goal)
return float(distance < self.distance_threshold)
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info) -> float:
distance = goal_distance(achieved_goal, desired_goal)
self.last_distance = distance
if self.reward_type == "sparse":
return -(distance > self.distance_threshold).astype(np.float32)
else:
return -distance
def get_task_metrics(self) -> Dict:
return {}
| 1,331 | 29.272727 | 97 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/core/__init__.py
|
from .task import Task
from .language_task import LanguageTask
| 63 | 20.333333 | 39 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/tasks/core/language_task.py
|
import itertools
from typing import Dict, List, Tuple
import numpy as np
from lanro_gym.robots.pybrobot import PyBulletRobot
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.tasks.scene import basic_scene
from lanro_gym.env_utils import RGBCOLORS, TaskObjectList, SHAPES, WEIGHTS, SIZES, valid_task_object_combination, distinguishable_by_primary_or_secondary
from lanro_gym.language_utils import create_commands, word_in_string
import itertools
class LanguageTask:
distance_threshold: float = 0.05
object_size: float = 0.04
current_instruction: np.ndarray
action_verbs: List = []
secondary_props_words: List = []
obj_range_low: np.ndarray
obj_range_high: np.ndarray
task_object_list: TaskObjectList
num_obj: int = 0
np_random: np.random.Generator
# hindsight instruction flags and metrics
hindsight_instruction = None
use_hindsight_instructions: bool = False
total_hindsight_instruction_episodes: int = 0
discovered_hindsight_instruction_ctr: int = 0
# action repair flags and metrics
use_action_repair: bool = False
use_negations_action_repair: bool = False
ep_action_repair_returned: bool = False
action_repair_success_ctr: int = 0
action_repair_ctr: int = 0
total_action_repair_episodes: int = 0
# pybullet user debug text ids
instruction_sim_id = 43
action_repair_sim_id = 44
def __init__(self,
sim: PyBulletSimulation,
robot: PyBulletRobot,
mode: str,
use_hindsight_instructions: bool,
use_action_repair: bool,
delay_action_repair: bool,
use_negations_action_repair: bool,
num_obj: int,
use_synonyms: bool = False):
self.sim = sim
self.robot = robot
self.use_hindsight_instructions = use_hindsight_instructions
self.use_action_repair = use_action_repair
self.use_negations_action_repair = use_negations_action_repair
self.num_obj = num_obj
self.mode = mode
self.use_synonyms = use_synonyms
self.delay_action_repair = delay_action_repair
self.ep_delayed_ar_command = None
_args = dict(
color_mode='color' in mode,
shape_mode='shape' in mode,
size_mode='size' in mode,
weight_mode='weight' in mode,
)
self.task_object_list = TaskObjectList(sim, **_args)
self.object_properties = self.task_object_list.get_obj_properties()
def get_goal(self) -> str:
""" Get the goal as instruction string """
return self.current_instruction.item()
def reset(self):
"""Reset the task: sample a new goal"""
raise NotImplementedError
def is_success(self) -> float:
raise NotImplementedError
def compute_reward(self) -> float:
raise NotImplementedError
def _create_scene(self) -> None:
basic_scene(self.sim)
def get_all_instructions(self) -> List[str]:
instruction_set = np.concatenate([
create_commands("instruction",
_property_tuple,
action_verbs=self.action_verbs,
use_synonyms=self.use_synonyms) for _property_tuple in self.object_properties
])
if self.use_action_repair:
action_repair_set = np.concatenate([
create_commands("repair", _property_tuple, use_synonyms=self.use_synonyms)
for _property_tuple in self.object_properties
])
negations = np.concatenate([
create_commands("negation", _property_tuple, use_synonyms=self.use_synonyms)
for _property_tuple in self.object_properties
]) if self.use_negations_action_repair else []
action_repair_set = np.concatenate([action_repair_set, negations])
# combine each instruction with each action repair command
# NOTE: concatenating two huge arrays will make the SWAP MEM explode
# -> For-Loop is slow but works
instruction_action_repair_set = [
_instr + ' ' + _ar for _instr, _ar in itertools.product(instruction_set, action_repair_set)
]
instruction_set = np.concatenate((instruction_set, instruction_action_repair_set))
return list(set(instruction_set))
def get_instructions_by_properties(self):
inst_properties = {}
inst_set = self.get_all_instructions()
for inst in inst_set:
if inst not in inst_properties.keys():
inst_properties[inst] = {"color": '', "shape": '', "weight": '', "size": ''}
for obj_prop in np.concatenate([RGBCOLORS, SHAPES, WEIGHTS, SIZES]):
if isinstance(obj_prop, RGBCOLORS):
inst_word = word_in_string(inst, [obj_prop.name.lower()])
if inst_word:
inst_properties[inst]["color"] = inst_word
if isinstance(obj_prop, SHAPES):
inst_word = word_in_string(inst, obj_prop.value[1])
if inst_word:
inst_properties[inst]["shape"] = inst_word
if isinstance(obj_prop, WEIGHTS):
inst_word = word_in_string(inst, obj_prop.value[1])
if inst_word:
inst_properties[inst]["weight"] = inst_word
if isinstance(obj_prop, SIZES):
inst_word = word_in_string(inst, obj_prop.value[1])
if inst_word:
inst_properties[inst]["size"] = inst_word
return inst_properties
def get_obs(self) -> np.ndarray:
observation = []
for idx in self.obj_indices_selection:
obj_key = f"object{idx}"
object_position = np.array(self.sim.get_base_position(obj_key))
object_rotation = np.array(self.sim.get_base_rotation(obj_key))
object_velocity = np.array(self.sim.get_base_velocity(obj_key))
object_angular_velocity = np.array(self.sim.get_base_angular_velocity(obj_key))
object_identifier = self.task_object_list.objects[idx].get_onehot()
observation.extend(
[object_position, object_rotation, object_velocity, object_angular_velocity, object_identifier])
return np.concatenate(observation)
def get_contact_with_fingers(self, target_body) -> List:
# check contact with fingers defined by ee_joints
finger_contacts = [
bool(self.sim.get_contact_points(target_body, self.robot.body_name, linkIndexB=finger_idx))
# assume the first two indices are the fingers of the end effector
for finger_idx in self.robot.ee_joints[:2]
]
return finger_contacts
def _sample_goal(self) -> None:
"""Randomly select one of the generated instructions for the current goal object"""
property_tuple = self.task_object_list.objects[self.goal_obj_idx].get_properties()
sentences = create_commands("instruction",
property_tuple,
action_verbs=self.action_verbs,
use_synonyms=self.use_synonyms)
self.current_instruction = self.np_random.choice(sentences, 1)
self.sim.bclient.addUserDebugText(self.get_goal(), [0.05, -.3, .4],
textSize=2.0,
replaceItemUniqueId=self.instruction_sim_id)
def _sample_objects(self) -> Tuple:
"""Randomize start position of objects."""
while True:
obj_positions = [[0.0, 0.0, self.object_size / 2] +
self.np_random.uniform(self.obj_range_low, self.obj_range_high)
for _ in range(self.num_obj)]
unique_distance_combinations = [np.linalg.norm(a - b) for a, b in itertools.combinations(obj_positions, 2)]
# if minimal distance between two objects is greater than three times
# the object size (as objects should not be on top of each other
# and we like to have a minimal distance between them)
if np.min(unique_distance_combinations) > self.object_size * 3:
return tuple(obj_positions)
def is_unique_obj_selection(self, obj_list):
for obj_tuple in itertools.combinations(obj_list, 2):
obj1, obj2 = obj_tuple
if not distinguishable_by_primary_or_secondary(obj1, obj2):
return False
return True
def sample_task_objects(self):
# remove old objects, as we do not want to destroy the whole simulation
remove_obj_keys = [key for key in self.sim._bodies_idx.keys() if 'object' in key]
for _key in remove_obj_keys:
self.sim.remove_body(_key)
# Ensure we only have duplicates along one feature dimension
while True:
self.obj_indices_selection = self.np_random.choice(len(self.task_object_list),
size=self.num_obj,
replace=False)
self.goal_obj_idx = self.np_random.choice(self.obj_indices_selection, 1)[0]
self.non_goal_body_indices = [idx for idx in self.obj_indices_selection if idx != self.goal_obj_idx]
valid_combination = np.all([
valid_task_object_combination(self.task_object_list[self.goal_obj_idx],
self.task_object_list[non_goal_idx])
for non_goal_idx in self.non_goal_body_indices
])
if self.use_action_repair:
unique_obj_selection = self.is_unique_obj_selection(
[self.task_object_list[self.goal_obj_idx]] +
[self.task_object_list[non_goal_idx] for non_goal_idx in self.non_goal_body_indices])
if valid_combination and unique_obj_selection:
break
else:
if valid_combination:
break
self.goal_object_body_key = f"object{self.goal_obj_idx}"
for obj_idx in self.obj_indices_selection:
object_body_key = f"object{obj_idx}"
self.task_object_list.objects[obj_idx].load(object_body_key)
def return_delayed_action_repair(self):
if self.ep_action_repair and self.delay_action_repair and self.ep_delayed_ar_command is not None:
if self._delay_ctr == 0:
self.action_repair_ctr += 1
action_repair_command = self.ep_delayed_ar_command
self.sim.bclient.addUserDebugText(self.get_goal(), [0.05, -.3, .4],
textSize=2.0,
replaceItemUniqueId=self.instruction_sim_id)
self.sim.bclient.addUserDebugText(action_repair_command, [0.05, -.3, .35],
textSize=2.0,
textColorRGB=RGBCOLORS.ORANGE.value[0],
replaceItemUniqueId=self.action_repair_sim_id)
self.current_instruction = np.array([self.get_goal() + ' ' + action_repair_command])
self.ep_delayed_ar_command = None
else:
self._delay_ctr -= 1
def merge_instruction_action_repair(self, repair_commands):
action_repair_command = self.np_random.choice(repair_commands, 1)[0]
if self.delay_action_repair and self.ep_delayed_ar_command is None:
self.ep_delayed_ar_command = action_repair_command
self._delay_ctr = self.np_random.integers(0, 20)
else:
self.action_repair_ctr += 1
self.sim.bclient.addUserDebugText(self.get_goal(), [0.05, -.3, .4],
textSize=2.0,
replaceItemUniqueId=self.instruction_sim_id)
self.sim.bclient.addUserDebugText(action_repair_command, [0.05, -.3, .35],
textSize=2.0,
textColorRGB=RGBCOLORS.ORANGE.value[0],
replaceItemUniqueId=self.action_repair_sim_id)
self.current_instruction = np.array([self.get_goal() + ' ' + action_repair_command])
# we only want one action repair command per episode
self.ep_action_repair_returned = True
def generate_action_repair_or_success(self) -> float:
if self.ep_action_repair and not self.ep_action_repair_returned:
# Simulate false instruction:
# randomly select another object as new goal
self.goal_obj_idx = self.np_random.choice(self.non_goal_body_indices, 1)[0]
self.non_goal_body_indices = [idx for idx in self.obj_indices_selection if idx != self.goal_obj_idx]
self.goal_object_body_key = f"object{self.goal_obj_idx}"
target_property_tuple = self.task_object_list.objects[self.goal_obj_idx].get_properties()
# generate action repair command considering new goal
repair_commands = create_commands("repair", target_property_tuple, use_synonyms=self.use_synonyms)
self.merge_instruction_action_repair(repair_commands)
return -1.0
else:
# if action repair already triggered and now successful with new goal (only count onces per episode)
if self.ep_action_repair_returned and not self.ep_action_repair_success:
# AR of this episode successful
self.action_repair_success_ctr += 1
self.ep_action_repair_success = True
return 0.0
def generate_hindsight_instruction(self, _obj_idx):
property_tuple = self.task_object_list.objects[_obj_idx].get_properties()
hindsight_sentences = create_commands("instruction",
property_tuple,
action_verbs=self.action_verbs,
use_synonyms=self.use_synonyms)
self.discovered_hindsight_instruction_ctr += 1
self.ep_hindsight_instruction_returned = True
self.hindsight_instruction = self.np_random.choice(hindsight_sentences, 1)[0]
def reset_hi_and_ar(self):
self.sim.bclient.removeUserDebugItem(self.action_repair_sim_id)
self.ep_hindsight_instruction = False
self.ep_hindsight_instruction_returned = False
self.ep_action_repair = False
self.ep_action_repair_returned = False
self.ep_action_repair_success = False
self.ep_delayed_ar_command = None
# if both options are true, select one of them randomly
if self.use_hindsight_instructions and self.use_action_repair:
if self.np_random.random() < 0.5:
self.ep_hindsight_instruction = self.np_random.random() < 0.5
else:
self.ep_action_repair = self.np_random.random() < 0.5
elif self.use_hindsight_instructions:
# generate hindsight instructions with a 25% chance
self.ep_hindsight_instruction = self.np_random.random() < 0.25
elif self.use_action_repair:
# generate action repair commands with a 50% chance
self.ep_action_repair = self.np_random.random() < 0.50
if self.ep_hindsight_instruction:
self.total_hindsight_instruction_episodes += 1
if self.ep_action_repair:
self.total_action_repair_episodes += 1
def get_task_metrics(self) -> Dict:
""" Returns a dict of task-specific metrics """
metrics = {}
if self.use_action_repair:
metrics["AR_episodes"] = self.total_action_repair_episodes
if self.total_action_repair_episodes:
metrics["AR_success_rate"] = round(self.action_repair_success_ctr / self.total_action_repair_episodes,
2)
if self.use_hindsight_instructions:
metrics["HI_episodes"] = self.total_hindsight_instruction_episodes
if self.total_hindsight_instruction_episodes:
metrics["HI_discovery_rate"] = round(
self.discovered_hindsight_instruction_ctr / self.total_hindsight_instruction_episodes, 2)
return metrics
| 16,748 | 48.553254 | 153 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/environments/__init__.py
|
from .panda import PandaReachEnv, PandaPushEnv, PandaSlideEnv, PandaStackEnv
from .panda_nl import PandaNLReachEnv, PandaNLPushEnv, PandaNLGraspEnv, PandaNLLiftEnv
| 164 | 54 | 86 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/environments/panda_nl.py
|
from lanro_gym.env import LanguageEnv
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.robots import Panda
from lanro_gym.tasks import NLReach, NLLift, NLGrasp, NLPush
class PandaNLReachEnv(LanguageEnv):
def __init__(self,
render=False,
num_obj=2,
obs_type="state",
mode='Color',
action_type='end_effector',
use_hindsight_instructions=False,
use_action_repair=False,
delay_action_repair=False,
use_negations_action_repair=False,
use_synonyms=False,
camera_mode='ego'):
sim = PyBulletSimulation(render=render)
robot = Panda(sim, fixed_gripper=True, action_type=action_type, camera_mode=camera_mode)
task = NLReach(sim,
robot,
num_obj=num_obj,
mode=mode,
use_hindsight_instructions=use_hindsight_instructions,
use_action_repair=use_action_repair,
delay_action_repair=delay_action_repair,
use_negations_action_repair=use_negations_action_repair,
use_synonyms=use_synonyms)
LanguageEnv.__init__(self, sim, robot, task, obs_type=obs_type)
class PandaNLGraspEnv(LanguageEnv):
def __init__(self,
render=False,
num_obj=2,
obs_type="state",
mode='Color',
action_type='end_effector',
use_hindsight_instructions=False,
use_action_repair=False,
delay_action_repair=False,
use_negations_action_repair=False,
use_synonyms=False,
camera_mode='ego'):
sim = PyBulletSimulation(render=render)
robot = Panda(sim, fixed_gripper=False, action_type=action_type, camera_mode=camera_mode)
task = NLGrasp(sim,
robot,
num_obj=num_obj,
mode=mode,
use_hindsight_instructions=use_hindsight_instructions,
use_action_repair=use_action_repair,
delay_action_repair=delay_action_repair,
use_negations_action_repair=use_negations_action_repair,
use_synonyms=use_synonyms)
LanguageEnv.__init__(self, sim, robot, task, obs_type=obs_type)
class PandaNLLiftEnv(LanguageEnv):
def __init__(self,
render=False,
num_obj=2,
obs_type="state",
mode='Color',
action_type='end_effector',
use_hindsight_instructions=False,
use_action_repair=False,
delay_action_repair=False,
use_negations_action_repair=False,
use_synonyms=False,
camera_mode='ego'):
sim = PyBulletSimulation(render=render)
robot = Panda(sim, fixed_gripper=False, action_type=action_type, camera_mode=camera_mode)
task = NLLift(sim,
robot,
num_obj=num_obj,
mode=mode,
use_hindsight_instructions=use_hindsight_instructions,
use_action_repair=use_action_repair,
delay_action_repair=delay_action_repair,
use_negations_action_repair=use_negations_action_repair,
use_synonyms=use_synonyms)
LanguageEnv.__init__(self, sim, robot, task, obs_type=obs_type)
class PandaNLPushEnv(LanguageEnv):
def __init__(self,
render=False,
num_obj=2,
obs_type="state",
mode='Color',
action_type='end_effector',
use_hindsight_instructions=False,
use_action_repair=False,
delay_action_repair=False,
use_negations_action_repair=False,
use_synonyms=False,
camera_mode='ego'):
sim = PyBulletSimulation(render=render)
robot = Panda(sim, fixed_gripper=True, action_type=action_type, camera_mode=camera_mode)
task = NLPush(sim,
robot,
num_obj=num_obj,
mode=mode,
use_hindsight_instructions=use_hindsight_instructions,
use_action_repair=use_action_repair,
delay_action_repair=delay_action_repair,
use_negations_action_repair=use_negations_action_repair,
use_synonyms=use_synonyms)
LanguageEnv.__init__(self, sim, robot, task, obs_type=obs_type)
| 4,872 | 40.649573 | 97 |
py
|
lanro-gym
|
lanro-gym-main/lanro_gym/environments/panda.py
|
from lanro_gym.env import GoalEnv
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.robots import Panda
from lanro_gym.tasks import Reach, Push, Stack, Slide
class PandaReachEnv(GoalEnv):
def __init__(self, render=False, reward_type="sparse", action_type='end_effector'):
sim = PyBulletSimulation(render=render)
robot = Panda(sim, fixed_gripper=True, action_type=action_type)
task = Reach(
sim,
reward_type=reward_type,
get_ee_position=robot.get_ee_position,
)
GoalEnv.__init__(self, sim, robot, task)
class PandaPushEnv(GoalEnv):
def __init__(self, render=False, reward_type="sparse", action_type='end_effector'):
sim = PyBulletSimulation(render=render)
robot = Panda(sim, fixed_gripper=True, action_type=action_type)
task = Push(sim, reward_type=reward_type)
GoalEnv.__init__(self, sim, robot, task)
class PandaSlideEnv(GoalEnv):
def __init__(self, render=False, reward_type="sparse", action_type='end_effector'):
sim = PyBulletSimulation(render=render)
robot = Panda(sim, fixed_gripper=True, action_type=action_type)
task = Slide(sim, reward_type=reward_type)
GoalEnv.__init__(self, sim, robot, task)
class PandaStackEnv(GoalEnv):
def __init__(self, render=False, reward_type="sparse", num_obj=2, goal_z_range=0.0, action_type='end_effector'):
sim = PyBulletSimulation(render=render)
robot = Panda(sim, fixed_gripper=False, action_type=action_type)
task = Stack(sim, reward_type=reward_type, num_obj=num_obj, goal_z_range=goal_z_range)
GoalEnv.__init__(self, sim, robot, task)
| 1,697 | 36.733333 | 116 |
py
|
lanro-gym
|
lanro-gym-main/examples/scripted_grasp.py
|
"""A script to demonstrate grasping."""
import gymnasium as gym
import lanro_gym
import time as time
import numpy as np
env = gym.make("PandaNLLift2Shape-v0", render=True)
total_ep = 100
start_t = time.time()
for _ in range(total_ep):
obs, info = env.reset()
goal_pos = env.sim.get_base_position(env.task.goal_object_body_key)
for i in range(env._max_episode_steps * 2):
ee_pos = obs['observation'][:3]
if i < 35:
action = np.concatenate((goal_pos - ee_pos, [0]))
action[3] = 1
elif i < 45:
action = np.concatenate((goal_pos - ee_pos, [0]))
action[3] = -1
elif i < 60:
action = np.zeros((4, ))
action[2] = 0.05
action[3] = -1
env.step(action)
| 783 | 26.034483 | 71 |
py
|
lanro-gym
|
lanro-gym-main/examples/fps.py
|
"""A script to measure the FPS"""
import gymnasium as gym
import lanro_gym
import time as time
env = gym.make("PandaNLPush2HIAR-v0")
# env = gym.make("PandaNLPush2PixelEgoHIAR-v0")
total_ep = 100
step_ctr = 0
start_t = time.time()
for _ in range(total_ep):
env.reset()
for _ in range(env._max_episode_steps):
_ = env.step(env.action_space.sample())
step_ctr += 1
print(f"FPS: {int(step_ctr / (time.time() - start_t))}")
| 446 | 23.833333 | 56 |
py
|
lanro-gym
|
lanro-gym-main/examples/scripted_reach.py
|
"""A script to demonstrate reaching."""
import gymnasium as gym
import lanro_gym
import time as time
import numpy as np
env = gym.make("PandaReach-v0", render=True)
total_ep = 100
start_t = time.time()
for _ in range(total_ep):
obs, info = env.reset()
goal_pos = obs['desired_goal']
for i in range(env._max_episode_steps * 4):
ee_pos = obs['achieved_goal']
action = (goal_pos - ee_pos).copy()
if i < 25:
action *= 0.75
elif i < 50:
action *= 0.5
elif i < 75:
action *= 0.5
else:
action = np.zeros_like(action)
env.step(action)
| 646 | 22.962963 | 47 |
py
|
lanro-gym
|
lanro-gym-main/test/nl_utils_test.py
|
import numpy as np
from lanro_gym.language_utils import parse_instructions, create_commands, Vocabulary, word_in_string
from lanro_gym.env_utils import SHAPES, RGBCOLORS
def test_parse_instruction():
instruction_list = [
"Hello world",
"World helo",
"Sunny weather",
"Lorem ipsum dolor sit amet",
]
word_list, max_instr_len = parse_instructions(instruction_list)
assert len(word_list) == 10
assert max_instr_len == 5
def test_command_creation():
commands = create_commands("instruction", (RGBCOLORS.RED, SHAPES.CUBE), action_verbs=["pick", "tick"])
assert len(commands) == 2
def test_command_creation_synonyms():
commands = create_commands("instruction", (RGBCOLORS.RED, SHAPES.CUBE),
action_verbs=["pick", "tick"],
use_synonyms=True)
assert len(commands) == 12
def test_command_creation_synonyms_only():
commands = create_commands("instruction", (RGBCOLORS.RED, SHAPES.CUBE),
action_verbs=["pick", "tick"],
use_base=False,
use_synonyms=True)
assert len(commands) == 4
def test_command_creation_list():
commands = np.concatenate(
[create_commands("instruction", (color, SHAPES.CUBE), action_verbs=["pick", "tick"]) for color in RGBCOLORS])
assert len(commands) == 24
def test_command_creation_list_synonyms():
commands = np.concatenate([
create_commands("instruction", (color, SHAPES.CUBE), action_verbs=["pick", "tick"], use_synonyms=True)
for color in RGBCOLORS
])
assert len(commands) == 144
def test_command_creation_list_synonyms_only():
commands = np.concatenate([
create_commands("instruction", (color, SHAPES.CUBE),
action_verbs=["pick", "tick"],
use_base=False,
use_synonyms=True) for color in RGBCOLORS
])
assert len(commands) == 48
def test_action_repair_command_creation():
commands = create_commands("repair", (RGBCOLORS.RED, SHAPES.CUBE))
assert len(commands) == 6
def test_action_repair_command_creation_synonyms():
commands = create_commands("repair", (RGBCOLORS.RED, SHAPES.CUBE), use_synonyms=True)
assert len(commands) == 36
def test_action_repair_command_creation_synonyms_only():
commands = create_commands("repair", (RGBCOLORS.RED, SHAPES.CUBE), use_base=False, use_synonyms=True)
assert len(commands) == 12
def test_negation_command_creation():
commands = create_commands("negation", (RGBCOLORS.RED, SHAPES.CUBE))
assert len(commands) == 1
def test_negation_command_creation_synonyms():
commands = create_commands("negation", (RGBCOLORS.RED, SHAPES.CUBE), use_synonyms=True)
assert len(commands) == 6
def test_negation_command_creation_synonyms_only():
commands = create_commands("negation", (RGBCOLORS.RED, SHAPES.CUBE), use_base=False, use_synonyms=True)
assert len(commands) == 2
def test_word_in_string():
instruction_string = "Lorem ipsum dolor sit amet"
search_words = np.array(["ipsum"])
assert "ipsum" == word_in_string(instruction_string, search_words)
assert "" == word_in_string(instruction_string, np.array(["dog", "cat"]))
def test_vocabulary():
vocab = Vocabulary(["hello", "world", "sunny", "weather"])
assert vocab("hello") == 1
assert vocab.idx_to_word(0) == "<pad>"
assert vocab.word_to_idx("hello") == 1
assert vocab.word_to_idx("sunny") == 2
assert len(vocab) == 5
| 3,593 | 32.90566 | 117 |
py
|
lanro-gym
|
lanro-gym-main/test/env_test.py
|
import gymnasium as gym
import numpy as np
import lanro_gym
def run_random_policy(env):
env.reset()
for _ in range(env._max_episode_steps):
obs = env.step(env.action_space.sample())[0]
assert np.all(obs['observation'] <= env.observation_space['observation'].high) == True
assert np.all(obs['observation'] >= env.observation_space['observation'].low) == True
env.close()
def check_calc_reward(env):
"""Test `compute_reward()` for HER compatibility.""" ""
obs, _ = env.reset()
ag = obs['achieved_goal']
g = obs['desired_goal']
single_reward = env.compute_reward(ag, g, None)
assert single_reward in [-1.0, 0.0]
batch_size = 128
ag_batch = np.stack([ag for _ in range(batch_size)])
g_batch = np.stack([g for _ in range(batch_size)])
batch_reward = env.compute_reward(ag_batch, g_batch, None)
assert batch_reward.shape[0] == batch_size
def test_env_open_close():
env = gym.make("PandaReach-v0")
env.reset()
env.close()
def test_goal_conditioned_envs():
render_mode = False
action_types = [
'absolute_quat', 'relative_quat', 'relative_joints', 'absolute_joints', 'absolute_rpy', 'relative_rpy',
'end_effector'
]
for a_type in action_types:
for robot in ['Panda']:
for task in ['Reach', 'Push', 'Slide', 'PickAndPlace']:
run_random_policy(gym.make(f'{robot}{task}-v0', render=render_mode, action_type=a_type))
check_calc_reward(gym.make(f'{robot}{task}-v0'))
for task in ['Stack2', 'Stack3', 'Stack4']:
run_random_policy(gym.make(f'{robot}{task}-v0', render=render_mode, action_type=a_type))
check_calc_reward(gym.make(f'{robot}{task}-v0'))
def test_language_conditioned_envs():
render_mode = False
obj_count = 2
action_types = [
'absolute_quat', 'relative_quat', 'relative_joints', 'absolute_joints', 'absolute_rpy', 'relative_rpy',
'end_effector'
]
for a_type in action_types:
for robot in ['Panda']:
for lang_task in ['NLReach', 'NLPush', 'NLGrasp', 'NLLift']:
for _mode in ['', 'Color', 'Shape', 'Weight', 'Size', 'ColorShape', 'WeightShape', 'SizeShape']:
for _obstype in ["", "PixelEgo", "PixelStatic"]:
for _use_syn in ["", "Synonyms"]:
for _hindsight_instr in ["", "HI"]:
for _action_repair in ["", "AR", "ARN", "ARD", "ARND"]:
id = f'{robot}{lang_task}{obj_count}{_mode}{_obstype}{_use_syn}{_hindsight_instr}{_action_repair}-v0'
run_random_policy(gym.make(id, render=render_mode, action_type=a_type))
| 2,782 | 39.926471 | 137 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.