|
""" |
|
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning. |
|
It is available for more than 300 languages. |
|
|
|
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like |
|
""" |
|
import os |
|
import sentence_transformers |
|
import tarfile |
|
import gzip |
|
|
|
|
|
|
|
|
|
|
|
source_languages = set(['eng']) |
|
target_languages = set(['deu', 'ara', 'tur', 'spa', 'ita', 'fra']) |
|
|
|
num_dev_sentences = 1000 |
|
|
|
|
|
tatoeba_folder = "../datasets/tatoeba" |
|
output_folder = "parallel-sentences/" |
|
|
|
|
|
|
|
|
|
sentences_file_bz2 = os.path.join(tatoeba_folder, 'sentences.tar.bz2') |
|
sentences_file = os.path.join(tatoeba_folder, 'sentences.csv') |
|
links_file_bz2 = os.path.join(tatoeba_folder, 'links.tar.bz2') |
|
links_file = os.path.join(tatoeba_folder, 'links.csv') |
|
|
|
download_url = "https://downloads.tatoeba.org/exports/" |
|
|
|
|
|
os.makedirs(tatoeba_folder, exist_ok=True) |
|
os.makedirs(output_folder, exist_ok=True) |
|
|
|
|
|
for filepath in [sentences_file_bz2, links_file_bz2]: |
|
if not os.path.exists(filepath): |
|
url = download_url+os.path.basename(filepath) |
|
print("Download", url) |
|
sentence_transformers.util.http_get(url, filepath) |
|
|
|
|
|
if not os.path.exists(sentences_file): |
|
print("Extract", sentences_file_bz2) |
|
tar = tarfile.open(sentences_file_bz2, "r:bz2") |
|
tar.extract('sentences.csv', path=tatoeba_folder) |
|
tar.close() |
|
|
|
if not os.path.exists(links_file): |
|
print("Extract", links_file_bz2) |
|
tar = tarfile.open(links_file_bz2, "r:bz2") |
|
tar.extract('links.csv', path=tatoeba_folder) |
|
tar.close() |
|
|
|
|
|
|
|
sentences = {} |
|
all_langs = target_languages.union(source_languages) |
|
print("Read sentences.csv file") |
|
with open(sentences_file, encoding='utf8') as fIn: |
|
for line in fIn: |
|
id, lang, sentence = line.strip().split('\t') |
|
if lang in all_langs: |
|
sentences[id] = (lang, sentence) |
|
|
|
|
|
print("Read links.csv") |
|
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages} |
|
with open(links_file, encoding='utf8') as fIn: |
|
for line in fIn: |
|
src_id, target_id = line.strip().split() |
|
|
|
if src_id in sentences and target_id in sentences: |
|
src_lang, src_sent = sentences[src_id] |
|
trg_lang, trg_sent = sentences[target_id] |
|
|
|
if src_lang in source_languages and trg_lang in target_languages: |
|
if src_sent not in translations[src_lang][trg_lang]: |
|
translations[src_lang][trg_lang][src_sent] = [] |
|
translations[src_lang][trg_lang][src_sent].append(trg_sent) |
|
|
|
|
|
print("Write output files") |
|
for src_lang in source_languages: |
|
for trg_lang in target_languages: |
|
source_sentences = list(translations[src_lang][trg_lang]) |
|
train_sentences = source_sentences[num_dev_sentences:] |
|
dev_sentences = source_sentences[0:num_dev_sentences] |
|
|
|
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences))) |
|
if len(dev_sentences) > 0: |
|
with gzip.open(os.path.join(output_folder, 'Tatoeba-{}-{}-dev.tsv.gz'.format(src_lang, trg_lang)), 'wt', encoding='utf8') as fOut: |
|
for sent in dev_sentences: |
|
fOut.write("\t".join([sent]+translations[src_lang][trg_lang][sent])) |
|
fOut.write("\n") |
|
|
|
if len(train_sentences) > 0: |
|
with gzip.open(os.path.join(output_folder, 'Tatoeba-{}-{}-train.tsv.gz'.format(src_lang, trg_lang)), 'wt', encoding='utf8') as fOut: |
|
for sent in train_sentences: |
|
fOut.write("\t".join([sent]+translations[src_lang][trg_lang][sent])) |
|
fOut.write("\n") |
|
|
|
|
|
print("---DONE---") |
|
|