Spaces:
Runtime error
Runtime error
FEAT: Models
Browse files- model/c3c3bdb7ad80396e69de171995e2038f900940c8/config.json +26 -0
- model/c3c3bdb7ad80396e69de171995e2038f900940c8/pytorch_model.bin +3 -0
- model/c3c3bdb7ad80396e69de171995e2038f900940c8/tokenizer/special_tokens_map.json +7 -0
- model/c3c3bdb7ad80396e69de171995e2038f900940c8/tokenizer/tokenizer.json +0 -0
- model/c3c3bdb7ad80396e69de171995e2038f900940c8/tokenizer/tokenizer_config.json +13 -0
- model/c3c3bdb7ad80396e69de171995e2038f900940c8/tokenizer/vocab.txt +0 -0
- model/c3c3bdb7ad80396e69de171995e2038f900940c8/training_args.bin +3 -0
- model/e09d71f55f4b6fc20135f856bf029322a3265d8d/config.json +26 -0
- model/e09d71f55f4b6fc20135f856bf029322a3265d8d/optimizer.pt +3 -0
- model/e09d71f55f4b6fc20135f856bf029322a3265d8d/pytorch_model.bin +3 -0
- model/e09d71f55f4b6fc20135f856bf029322a3265d8d/tokenizer/special_tokens_map.json +7 -0
- model/e09d71f55f4b6fc20135f856bf029322a3265d8d/tokenizer/tokenizer.json +0 -0
- model/e09d71f55f4b6fc20135f856bf029322a3265d8d/tokenizer/tokenizer_config.json +13 -0
- model/e09d71f55f4b6fc20135f856bf029322a3265d8d/tokenizer/vocab.txt +0 -0
model/c3c3bdb7ad80396e69de171995e2038f900940c8/config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "bert-base-uncased",
|
3 |
+
"architectures": [
|
4 |
+
"BertForNextSentencePrediction"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.17.0",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 30522
|
26 |
+
}
|
model/c3c3bdb7ad80396e69de171995e2038f900940c8/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aec095ee8fc2c88ca0460f59e19df6b38c5c91d38a3ab04928ce7eb996c0d62a
|
3 |
+
size 438022005
|
model/c3c3bdb7ad80396e69de171995e2038f900940c8/tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
model/c3c3bdb7ad80396e69de171995e2038f900940c8/tokenizer/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model/c3c3bdb7ad80396e69de171995e2038f900940c8/tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"do_lower_case": true,
|
4 |
+
"mask_token": "[MASK]",
|
5 |
+
"model_max_length": 512,
|
6 |
+
"pad_token": "[PAD]",
|
7 |
+
"sep_token": "[SEP]",
|
8 |
+
"special_tokens_map_file": null,
|
9 |
+
"strip_accents": null,
|
10 |
+
"tokenize_chinese_chars": true,
|
11 |
+
"tokenizer_class": "BertTokenizer",
|
12 |
+
"unk_token": "[UNK]"
|
13 |
+
}
|
model/c3c3bdb7ad80396e69de171995e2038f900940c8/tokenizer/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model/c3c3bdb7ad80396e69de171995e2038f900940c8/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b9965b76852aa7c689ed048738e3db2f0b386154cddbfb42d9da7a064a9f9231
|
3 |
+
size 3195
|
model/e09d71f55f4b6fc20135f856bf029322a3265d8d/config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "bert-base-uncased",
|
3 |
+
"architectures": [
|
4 |
+
"BertForNextSentencePrediction"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.17.0",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 30522
|
26 |
+
}
|
model/e09d71f55f4b6fc20135f856bf029322a3265d8d/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab19babe7ae39d1b6746d1dec58ab7758b0bacc33346b66e9c7da94419bebf96
|
3 |
+
size 124944384
|
model/e09d71f55f4b6fc20135f856bf029322a3265d8d/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1f791a2952d2707e82b275b14738b0fcd52c56b9a6acd597f4480829737d4368
|
3 |
+
size 438022005
|
model/e09d71f55f4b6fc20135f856bf029322a3265d8d/tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
model/e09d71f55f4b6fc20135f856bf029322a3265d8d/tokenizer/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model/e09d71f55f4b6fc20135f856bf029322a3265d8d/tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"do_lower_case": true,
|
4 |
+
"mask_token": "[MASK]",
|
5 |
+
"model_max_length": 512,
|
6 |
+
"pad_token": "[PAD]",
|
7 |
+
"sep_token": "[SEP]",
|
8 |
+
"special_tokens_map_file": null,
|
9 |
+
"strip_accents": null,
|
10 |
+
"tokenize_chinese_chars": true,
|
11 |
+
"tokenizer_class": "BertTokenizer",
|
12 |
+
"unk_token": "[UNK]"
|
13 |
+
}
|
model/e09d71f55f4b6fc20135f856bf029322a3265d8d/tokenizer/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|