Training in progress, step 500
Browse files- added_tokens.json +3 -0
 - model.safetensors +1 -1
 - runs/Mar25_15-02-45_5412af28fa8e/events.out.tfevents.1711378966.5412af28fa8e.35.3 +3 -0
 - runs/Mar25_15-04-04_5412af28fa8e/events.out.tfevents.1711379044.5412af28fa8e.35.4 +3 -0
 - special_tokens_map.json +14 -0
 - tokenizer.json +0 -0
 - tokenizer_config.json +66 -0
 - training_args.bin +1 -1
 - vocab.txt +0 -0
 
    	
        added_tokens.json
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "[EOS]": 21128
         
     | 
| 3 | 
         
            +
            }
         
     | 
    	
        model.safetensors
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
             
            size 408289920
         
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:97ba9be312e7adf1c6dce185641ca4c4ef7178c6a6785fcdedf275bac93d6933
         
     | 
| 3 | 
         
             
            size 408289920
         
     | 
    	
        runs/Mar25_15-02-45_5412af28fa8e/events.out.tfevents.1711378966.5412af28fa8e.35.3
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:a8ea95a77496a5a89e62142567c12530fa1233ba66df8f2bc4604c4089bbfffe
         
     | 
| 3 | 
         
            +
            size 5262
         
     | 
    	
        runs/Mar25_15-04-04_5412af28fa8e/events.out.tfevents.1711379044.5412af28fa8e.35.4
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:c33f85ca36fc4b3ad2ef28b4ec43a5e9b650a1aded08125d394b530f87c850e1
         
     | 
| 3 | 
         
            +
            size 5473
         
     | 
    	
        special_tokens_map.json
    ADDED
    
    | 
         @@ -0,0 +1,14 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "cls_token": "[CLS]",
         
     | 
| 3 | 
         
            +
              "eos_token": {
         
     | 
| 4 | 
         
            +
                "content": "[EOS]",
         
     | 
| 5 | 
         
            +
                "lstrip": false,
         
     | 
| 6 | 
         
            +
                "normalized": false,
         
     | 
| 7 | 
         
            +
                "rstrip": false,
         
     | 
| 8 | 
         
            +
                "single_word": false
         
     | 
| 9 | 
         
            +
              },
         
     | 
| 10 | 
         
            +
              "mask_token": "[MASK]",
         
     | 
| 11 | 
         
            +
              "pad_token": "[PAD]",
         
     | 
| 12 | 
         
            +
              "sep_token": "[SEP]",
         
     | 
| 13 | 
         
            +
              "unk_token": "[UNK]"
         
     | 
| 14 | 
         
            +
            }
         
     | 
    	
        tokenizer.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        tokenizer_config.json
    ADDED
    
    | 
         @@ -0,0 +1,66 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "added_tokens_decoder": {
         
     | 
| 3 | 
         
            +
                "0": {
         
     | 
| 4 | 
         
            +
                  "content": "[PAD]",
         
     | 
| 5 | 
         
            +
                  "lstrip": false,
         
     | 
| 6 | 
         
            +
                  "normalized": false,
         
     | 
| 7 | 
         
            +
                  "rstrip": false,
         
     | 
| 8 | 
         
            +
                  "single_word": false,
         
     | 
| 9 | 
         
            +
                  "special": true
         
     | 
| 10 | 
         
            +
                },
         
     | 
| 11 | 
         
            +
                "100": {
         
     | 
| 12 | 
         
            +
                  "content": "[UNK]",
         
     | 
| 13 | 
         
            +
                  "lstrip": false,
         
     | 
| 14 | 
         
            +
                  "normalized": false,
         
     | 
| 15 | 
         
            +
                  "rstrip": false,
         
     | 
| 16 | 
         
            +
                  "single_word": false,
         
     | 
| 17 | 
         
            +
                  "special": true
         
     | 
| 18 | 
         
            +
                },
         
     | 
| 19 | 
         
            +
                "101": {
         
     | 
| 20 | 
         
            +
                  "content": "[CLS]",
         
     | 
| 21 | 
         
            +
                  "lstrip": false,
         
     | 
| 22 | 
         
            +
                  "normalized": false,
         
     | 
| 23 | 
         
            +
                  "rstrip": false,
         
     | 
| 24 | 
         
            +
                  "single_word": false,
         
     | 
| 25 | 
         
            +
                  "special": true
         
     | 
| 26 | 
         
            +
                },
         
     | 
| 27 | 
         
            +
                "102": {
         
     | 
| 28 | 
         
            +
                  "content": "[SEP]",
         
     | 
| 29 | 
         
            +
                  "lstrip": false,
         
     | 
| 30 | 
         
            +
                  "normalized": false,
         
     | 
| 31 | 
         
            +
                  "rstrip": false,
         
     | 
| 32 | 
         
            +
                  "single_word": false,
         
     | 
| 33 | 
         
            +
                  "special": true
         
     | 
| 34 | 
         
            +
                },
         
     | 
| 35 | 
         
            +
                "103": {
         
     | 
| 36 | 
         
            +
                  "content": "[MASK]",
         
     | 
| 37 | 
         
            +
                  "lstrip": false,
         
     | 
| 38 | 
         
            +
                  "normalized": false,
         
     | 
| 39 | 
         
            +
                  "rstrip": false,
         
     | 
| 40 | 
         
            +
                  "single_word": false,
         
     | 
| 41 | 
         
            +
                  "special": true
         
     | 
| 42 | 
         
            +
                },
         
     | 
| 43 | 
         
            +
                "21128": {
         
     | 
| 44 | 
         
            +
                  "content": "[EOS]",
         
     | 
| 45 | 
         
            +
                  "lstrip": false,
         
     | 
| 46 | 
         
            +
                  "normalized": false,
         
     | 
| 47 | 
         
            +
                  "rstrip": false,
         
     | 
| 48 | 
         
            +
                  "single_word": false,
         
     | 
| 49 | 
         
            +
                  "special": true
         
     | 
| 50 | 
         
            +
                }
         
     | 
| 51 | 
         
            +
              },
         
     | 
| 52 | 
         
            +
              "clean_up_tokenization_spaces": true,
         
     | 
| 53 | 
         
            +
              "cls_token": "[CLS]",
         
     | 
| 54 | 
         
            +
              "do_basic_tokenize": true,
         
     | 
| 55 | 
         
            +
              "do_lower_case": false,
         
     | 
| 56 | 
         
            +
              "eos_token": "[EOS]",
         
     | 
| 57 | 
         
            +
              "mask_token": "[MASK]",
         
     | 
| 58 | 
         
            +
              "model_max_length": 1024,
         
     | 
| 59 | 
         
            +
              "never_split": null,
         
     | 
| 60 | 
         
            +
              "pad_token": "[PAD]",
         
     | 
| 61 | 
         
            +
              "sep_token": "[SEP]",
         
     | 
| 62 | 
         
            +
              "strip_accents": null,
         
     | 
| 63 | 
         
            +
              "tokenize_chinese_chars": true,
         
     | 
| 64 | 
         
            +
              "tokenizer_class": "BertTokenizer",
         
     | 
| 65 | 
         
            +
              "unk_token": "[UNK]"
         
     | 
| 66 | 
         
            +
            }
         
     | 
    	
        training_args.bin
    CHANGED
    
    | 
         @@ -1,3 +1,3 @@ 
     | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            -
            oid sha256: 
     | 
| 3 | 
         
             
            size 4920
         
     | 
| 
         | 
|
| 1 | 
         
             
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:81cb5f5bc33d08f04475075826792d5daa318eff66e5f7f1ac247f1514531eaf
         
     | 
| 3 | 
         
             
            size 4920
         
     | 
    	
        vocab.txt
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         |