import os, json, gzip

def zip_file(lines, dest_file):
        print(f"Zipping {dest_file} ... ")
        with gzip.open(dest_file, 'wt') as w:
            w.writelines(lines)
        print(f"    .... done.")

def split_train(MB_PER_FILE = 100):
    print("Splitting TRAIN into shards with {} MB per file:".format(MB_PER_FILE))

    os.makedirs("data", exist_ok=True)

    print("Working ...")
    cnt = 0
    lid = 0
    shard = 0
    cutoff = MB_PER_FILE * 1000 * 1000
    accumulator = []
    line_cnt = 0
    with open("train.txt", "r", encoding="utf8") as f:
        for line in f:
            line_cnt += 1
            if line_cnt % 1000000 == 0:
               print(f"\t{line_cnt/1000000:.2f}M lines, at shard {shard} ... ")
            cnt += len(line)

            data = {
                 "id": lid,
                 "text": line.strip()
            }
            accumulator.append(json.dumps(data, ensure_ascii=False)+"\n")
            lid += 1

            if cnt > cutoff:
                zip_file(accumulator, os.path.join("data", "train-"+str(shard).zfill(3) + ".json.gz"))
                cnt = 0
                accumulator = []
                shard += 1

    # flush buffer
    zip_file(accumulator, os.path.join("data", "train-"+str(shard).zfill(3) + ".json.gz"))
    shard += 1

    print("Done, wrote {} files.".format(shard))

def write_valid():
    # do valid
    cnt = 0
    lid = 0
    accumulator = []
    with open("valid.txt", "r", encoding="utf8") as f:
       for line in f:
           cnt += len(line)
           data = {
               "id": lid,
               "text": line.strip()
           }
           accumulator.append(json.dumps(data, ensure_ascii=False)+"\n")
           lid += 1

    zip_file(accumulator, os.path.join("data", "validation-000.json.gz"))

def split_into_train_and_valid(percent=0.01):
    import random
    import ftfy
    import tqdm
    train_file = open("train.txt", "w", encoding="utf8")
    valid_file = open("valid.txt", "w", encoding="utf8")
    with open("corpus-diac.txt", "r", encoding="utf8") as f:
         cnt = 0
         for line in tqdm.tqdm(f):
             cnt += 1
             if cnt < 2000000:
                 continue
             line = ftfy.fix_text(line.strip())+"\n"
             if random.random() <= percent:
                 valid_file.write(line)
             else:
                 train_file.write(line)
    train_file.close()
    valid_file.close()

#split_into_train_and_valid()
os.makedirs("data", exist_ok=True)
write_valid() # do valid
split_train() # do train