File size: 6,409 Bytes
bed0ae5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
#!/bin/bash

#SBATCH --ntasks=1
#SBATCH --cpus-per-task=40
#SBATCH --job-name=process
#SBATCH --nodelist=ilps-cn002
#SBATCH --time=2-00:00:00
#SBATCH --mem=256G

pwd
conda info --envs
source /home/stan1/anaconda3/bin/activate fairseq
cd /ivi/ilps/projects/ltl-mt/EC40-dataset

mkdir spm_sharded


######################## ------------ IMPORTRANT ------------ ########################

######## This is an example of how to build a sharded dataset (5 shards)
######## Before run the following code, you should have trained your sentencepiece/subword-mt tokenizer already
######## Then you should encode the dataset using spm, and then use following code to split them to 5 shards

#### For eval set, the most easiest way is to add the whole eval-set to all 5 shard fairseq data folder
### note: ha and kab is two exceptions (because of their data-size): you will find them in *SPECIAL* 

######################## ------------ IMPORTRANT ------------ ########################

SHARD_SUB_DIR=('0' '1' '2' '3' '4')
for i in "${!SHARD_SUB_DIR[@]}"; do
    SUB_NUMBER=${SHARD_SUB_DIR[i]}
    mkdir dataset/spm_sharded/shard${SUB_NUMBER}
done

HIGH=('de' 'nl' 'fr' 'es' 'ru' 'cs' 'hi' 'bn' 'ar' 'he')
MED=('sv' 'da' 'it' 'pt' 'pl' 'bg' 'kn' 'mr' 'mt') #ha
LOW=('af' 'lb' 'ro' 'oc' 'uk' 'sr' 'sd' 'gu' 'ti' 'am')
ELOW=('no' 'is' 'ast' 'ca' 'be' 'bs' 'ne' 'ur' 'so') #kab

SPM_DIR=dataset/spm
SPM_SHARD_DIR=dataset/spm_sharded

##

## HIGH 5m each file -> split to 1m for one shard
for i in "${!HIGH[@]}"; do
    LANG=${HIGH[i]}
    split -l 1000000 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
    split -l 1000000 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard

    for j in "${!SHARD_SUB_DIR[@]}"; do
        SUB_NUMBER=${SHARD_SUB_DIR[j]}
        mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
        mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
    done
done

# MED 1m each file -> split to 200K for one shard
for i in "${!MED[@]}"; do
    LANG=${MED[i]}
    split -l 200000 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
    split -l 200000 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard

    for j in "${!SHARD_SUB_DIR[@]}"; do
        SUB_NUMBER=${SHARD_SUB_DIR[j]}
        mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
        mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
    done
done

# LOW 100k each file -> split to 20k for one shard
for i in "${!LOW[@]}"; do
    LANG=${LOW[i]}
    split -l 20000 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
    split -l 20000 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard

    for j in "${!SHARD_SUB_DIR[@]}"; do
        SUB_NUMBER=${SHARD_SUB_DIR[j]}
        mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
        mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
    done
done

## ELOW 50k each file -> split to 10k for one shard
for i in "${!ELOW[@]}"; do
    LANG=${ELOW[i]}
    split -l 10000 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
    split -l 10000 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard

    for j in "${!SHARD_SUB_DIR[@]}"; do
        SUB_NUMBER=${SHARD_SUB_DIR[j]}
        mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
        mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
    done
done

# SPECIAL HA 344000 -> split to 68800 for one shard 
HA=('ha')
for i in "${!HA[@]}"; do
    LANG=${HA[i]}
    split -l 68800 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
    split -l 68800 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard

    for j in "${!SHARD_SUB_DIR[@]}"; do
        SUB_NUMBER=${SHARD_SUB_DIR[j]}
        mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
        mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
    done
done

# SPECIAL HA 18448 -> split to 3690 for one shard 
KAB=('kab')
for i in "${!KAB[@]}"; do
    LANG=${KAB[i]}
    split -l 3690 $SPM_DIR/train.en-$LANG.en -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.en.shard
    split -l 3690 $SPM_DIR/train.en-$LANG.$LANG -d -a 2 $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard

    for j in "${!SHARD_SUB_DIR[@]}"; do
        SUB_NUMBER=${SHARD_SUB_DIR[j]}
        mv $SPM_SHARD_DIR/train.en-$LANG.en.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.en
        mv $SPM_SHARD_DIR/train.en-$LANG.$LANG.shard0${SUB_NUMBER} dataset/spm_sharded/shard${SUB_NUMBER}/train.en-$LANG.$LANG
    done
done

# ------------------------ 4. Fairseq preparation Sharded ------------------------ #
SPM_DATA_DIR=dataset/spm_sharded
FAIRSEQ_DIR=dataset/fairseq-data-bin-sharded
mkdir ${FAIRSEQ_DIR}

cut -f1 dataset/spm/spm_64k.vocab | tail -n +4 | sed "s/$/ 100/g" > ${FAIRSEQ_DIR}/dict.txt

SHARD_SUB_DIR=('0' '1' '2' '3' '4')
for i in "${!SHARD_SUB_DIR[@]}"; do
    SUB_NUMBER=${SHARD_SUB_DIR[i]}
    mkdir $FAIRSEQ_DIR/shard${SUB_NUMBER}
done

# preprocess with mmap dataset
for SHARD in $(seq 0 4); do
    SRC=en
    for TGT in bg so ca da be bs mt es uk am hi ro no ti de cs lb pt nl mr is ne ur oc ast ha sv kab gu ar fr ru it pl sr sd he af kn bn; do
        fairseq-preprocess \
            --dataset-impl mmap \
            --source-lang ${SRC} \
            --target-lang ${TGT} \
            --trainpref ${SPM_DATA_DIR}/shard${SHARD}/train.${SRC}-${TGT} \
            --destdir ${FAIRSEQ_DIR}/shard${SHARD} \
            --thresholdtgt 0 \
            --thresholdsrc 0 \
            --workers 40 \
            --srcdict ${FAIRSEQ_DIR}/dict.txt \
            --tgtdict ${FAIRSEQ_DIR}/dict.txt
    cp ${FAIRSEQ_DIR}/dict.txt ${FAIRSEQ_DIR}/shard${SHARD}/dict.txt
    done
done