modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tktung/aicovid_pretrain | 68af37da8e1f906b1849ef67896b1f2d0edd2215 | 2021-10-22T03:24:52.000Z | [
"pytorch",
"wav2vec2",
"pretraining",
"transformers"
] | null | false | tktung | null | tktung/aicovid_pretrain | 1 | null | transformers | 30,400 | Entry not found |
tli8hf/unqover-bert-large-uncased-squad | 2ab5130b5ff2f51ef858ddacb48bfc8878d7223e | 2021-05-20T07:58:54.000Z | [
"pytorch",
"jax",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | tli8hf | null | tli8hf/unqover-bert-large-uncased-squad | 1 | null | transformers | 30,401 | Entry not found |
tli8hf/unqover-distilbert-base-uncased-squad | f04b7ddf1003e763a2d250768185f5cde721a285 | 2020-10-19T23:39:01.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | tli8hf | null | tli8hf/unqover-distilbert-base-uncased-squad | 1 | null | transformers | 30,402 | Entry not found |
tlkh/program-synthesis-gpt-neo-125m | 70be5917e071dc743b4c52e09097433a0dcbfc27 | 2021-09-29T02:32:15.000Z | [
"pytorch",
"gpt_neo",
"text-generation",
"transformers"
] | text-generation | false | tlkh | null | tlkh/program-synthesis-gpt-neo-125m | 1 | null | transformers | 30,403 | Entry not found |
tmills/timex-thyme-colon | f12ea29f54c1ca3ff16a8cb64e09af69ba03c4d3 | 2022-05-02T22:33:28.000Z | [
"pytorch",
"cnlpt",
"transformers"
] | null | false | tmills | null | tmills/timex-thyme-colon | 1 | null | transformers | 30,404 | Entry not found |
tnagata/dummy-model | 8f79339218139ed0e172d2d24ac0a3e7b2fb297c | 2022-02-18T12:21:27.000Z | [
"pytorch",
"camembert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | tnagata | null | tnagata/dummy-model | 1 | null | transformers | 30,405 | Entry not found |
toast22a/race_natural_number_oqpl_mc | 6b430e7d9adc749159f4d7adf0ee2c24d47d60ad | 2021-05-23T13:11:24.000Z | [
"pytorch",
"tf",
"jax",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | toast22a | null | toast22a/race_natural_number_oqpl_mc | 1 | null | transformers | 30,406 | Entry not found |
toastynews/electra-hongkongese-large-generator | 527e5b4c45d3380bb2389c221d3f5277c9a2d350 | 2020-07-07T04:45:30.000Z | [
"pytorch",
"tf",
"electra",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | toastynews | null | toastynews/electra-hongkongese-large-generator | 1 | null | transformers | 30,407 | Entry not found |
toastynews/electra-hongkongese-small-generator | 67d0c9bfe0169b95964b8345a3fe16fc80ea4adc | 2020-07-07T04:13:10.000Z | [
"pytorch",
"tf",
"electra",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | toastynews | null | toastynews/electra-hongkongese-small-generator | 1 | null | transformers | 30,408 | Entry not found |
tolgaand/tolgaand | e224bc3475a026c149a8fc7c7422d52a6e5895de | 2021-09-19T01:31:19.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | tolgaand | null | tolgaand/tolgaand | 1 | null | transformers | 30,409 | Entry not found |
tom1804/DialoGPT-small-HP | f6bebfcd16fdad75089070795a648845f41fde82 | 2021-06-20T15:13:33.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | tom1804 | null | tom1804/DialoGPT-small-HP | 1 | null | transformers | 30,410 | Entry not found |
tom1804/HP | 716e276111628a3abcf305a66150e10422f13342 | 2021-06-20T15:40:46.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | tom1804 | null | tom1804/HP | 1 | null | transformers | 30,411 | ---
tags:
- conversational
---
# My Awesome Model |
tom1804/HP_last | 2af3ef0f1dff2aee3640178bed094c81826b7c49 | 2021-06-20T15:48:05.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | tom1804 | null | tom1804/HP_last | 1 | null | transformers | 30,412 | ---
tags:
- conversational
---
# My Awesome Model |
tonyalves/wav2vec2-300M-teste2 | 1cbd3e8daa16cb17ffc035d1f92c96e7cb9d98af | 2022-01-09T17:16:10.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"model-index"
] | automatic-speech-recognition | false | tonyalves | null | tonyalves/wav2vec2-300M-teste2 | 1 | null | transformers | 30,413 | ---
tags:
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: wav2vec2-300M-teste2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-300M-teste2
This model was trained from scratch on the common_voice dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.11.3
- Pytorch 1.9.1+cu102
- Datasets 1.17.0
- Tokenizers 0.10.3
|
trangdieu/bert-large-retrained-2-epochs | b4cadaf94b220ae5bdaf480f72709ee51ea583d7 | 2021-07-19T15:21:06.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | trangdieu | null | trangdieu/bert-large-retrained-2-epochs | 1 | null | transformers | 30,414 | Entry not found |
trangdieu/bert-large-retrained-4-epochs | bfd9fa35309837b8cd55b6984b1bb5d4a00cbc11 | 2021-07-19T15:26:13.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | trangdieu | null | trangdieu/bert-large-retrained-4-epochs | 1 | null | transformers | 30,415 | Entry not found |
trangdieu/distilroberta-retrained-6-epochs | 82f856f1bae87af68735b3b8bdf1287c09cc6a49 | 2021-05-30T03:57:07.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | trangdieu | null | trangdieu/distilroberta-retrained-6-epochs | 1 | null | transformers | 30,416 | Entry not found |
transZ/BiBERT-ViBa | 648ab003886d8ac640cb81324eb8e6f226e707f6 | 2022-02-10T15:56:24.000Z | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | transZ | null | transZ/BiBERT-ViBa | 1 | null | transformers | 30,417 | Entry not found |
transformersbook/xlm-roberta-base-finetuned-panx-de | a14adde7f455d19a9106f07e7ab1ebf1083fabb4 | 2022-02-05T17:07:41.000Z | [
"pytorch",
"tensorboard",
"xlm-roberta",
"token-classification",
"dataset:xtreme",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] | token-classification | false | transformersbook | null | transformersbook/xlm-roberta-base-finetuned-panx-de | 1 | null | transformers | 30,418 | ---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-de
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
args: PAN-X.de
metrics:
- name: F1
type: f1
value: 0.8645910410381922
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-de
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the PAN-X dataset. The model is trained in Chapter 4: Multilingual Named Entity Recognition in the [NLP with Transformers book](https://learning.oreilly.com/library/view/natural-language-processing/9781098103231/). You can find the full code in the accompanying [Github repository](https://github.com/nlp-with-transformers/notebooks/blob/main/04_multilingual-ner.ipynb).
It achieves the following results on the evaluation set:
- Loss: 0.1388
- F1: 0.8646
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.2652 | 1.0 | 525 | 0.1602 | 0.8230 |
| 0.1314 | 2.0 | 1050 | 0.1372 | 0.8527 |
| 0.0806 | 3.0 | 1575 | 0.1388 | 0.8646 |
### Framework versions
- Transformers 4.12.0.dev0
- Pytorch 1.9.1+cu102
- Datasets 1.12.1
- Tokenizers 0.10.3
|
ttumyche/bluebert | 5542d51cf239eb306590472ac1602d84b166a2d8 | 2020-09-21T04:57:19.000Z | [
"pytorch",
"transformers"
] | null | false | ttumyche | null | ttumyche/bluebert | 1 | null | transformers | 30,419 | Entry not found |
tucan9389/distilbert-base-uncased-finetuned-squad | e2736808f801c450ab89a660b4cb1a2a9f74a091 | 2021-11-17T16:27:10.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | tucan9389 | null | tucan9389/distilbert-base-uncased-finetuned-squad | 1 | null | transformers | 30,420 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1560
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 1.2252 | 1.0 | 5533 | 1.1671 |
| 0.9494 | 2.0 | 11066 | 1.1279 |
| 0.7696 | 3.0 | 16599 | 1.1560 |
### Framework versions
- Transformers 4.12.4
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
|
tyoyo/byt5-base-TEDxJP-1body-0context-lr-small | 67edf94c97d2874f91df28b738a371b5ae882ea6 | 2021-11-25T14:44:51.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | tyoyo | null | tyoyo/byt5-base-TEDxJP-1body-0context-lr-small | 1 | null | transformers | 30,421 | Entry not found |
tyoyo/byt5-base-TEDxJP-1in-1out | 0193bd1e911b5714c8d1c779827707ce54543642 | 2021-11-25T12:22:24.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | tyoyo | null | tyoyo/byt5-base-TEDxJP-1in-1out | 1 | null | transformers | 30,422 | Entry not found |
tyoyo/t5-base-TEDxJP-11body-0context | e0566ed9b359cedfea5df2fe42758f2ddd14d00e | 2021-12-02T17:37:36.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:te_dx_jp",
"transformers",
"generated_from_trainer",
"license:cc-by-sa-4.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | tyoyo | null | tyoyo/t5-base-TEDxJP-11body-0context | 1 | null | transformers | 30,423 | ---
license: cc-by-sa-4.0
tags:
- generated_from_trainer
datasets:
- te_dx_jp
model-index:
- name: t5-base-TEDxJP-11body-0context
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-base-TEDxJP-11body-0context
This model is a fine-tuned version of [sonoisa/t5-base-japanese](https://huggingface.co/sonoisa/t5-base-japanese) on the te_dx_jp dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8068
- Wer: 0.1976
- Mer: 0.1904
- Wil: 0.2816
- Wip: 0.7184
- Hits: 602335
- Substitutions: 75050
- Deletions: 39435
- Insertions: 27185
- Cer: 0.1625
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 64
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer | Mer | Wil | Wip | Hits | Substitutions | Deletions | Insertions | Cer |
|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:------:|:------:|:-------------:|:---------:|:----------:|:------:|
| 0.8909 | 1.0 | 746 | 0.7722 | 0.3120 | 0.2861 | 0.3989 | 0.6011 | 558138 | 99887 | 58795 | 64983 | 0.2652 |
| 0.6786 | 2.0 | 1492 | 0.7021 | 0.2226 | 0.2122 | 0.3069 | 0.6931 | 592242 | 78773 | 45805 | 34978 | 0.1862 |
| 0.5627 | 3.0 | 2238 | 0.6996 | 0.2104 | 0.2016 | 0.2942 | 0.7058 | 597381 | 76593 | 42846 | 31392 | 0.1752 |
| 0.489 | 4.0 | 2984 | 0.7161 | 0.2030 | 0.1952 | 0.2865 | 0.7135 | 599808 | 75155 | 41857 | 28506 | 0.1684 |
| 0.4355 | 5.0 | 3730 | 0.7389 | 0.2000 | 0.1924 | 0.2837 | 0.7163 | 601815 | 75247 | 39758 | 28335 | 0.1651 |
| 0.3836 | 6.0 | 4476 | 0.7537 | 0.1992 | 0.1918 | 0.2829 | 0.7171 | 601846 | 75046 | 39928 | 27815 | 0.1640 |
| 0.3617 | 7.0 | 5222 | 0.7743 | 0.1995 | 0.1918 | 0.2832 | 0.7168 | 602287 | 75268 | 39265 | 28445 | 0.1642 |
| 0.3258 | 8.0 | 5968 | 0.7907 | 0.1971 | 0.1899 | 0.2809 | 0.7191 | 602800 | 74887 | 39133 | 27258 | 0.1620 |
| 0.3225 | 9.0 | 6714 | 0.8035 | 0.1981 | 0.1908 | 0.2823 | 0.7177 | 602418 | 75372 | 39030 | 27625 | 0.1630 |
| 0.3162 | 10.0 | 7460 | 0.8068 | 0.1976 | 0.1904 | 0.2816 | 0.7184 | 602335 | 75050 | 39435 | 27185 | 0.1625 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu102
- Datasets 1.15.1
- Tokenizers 0.10.3
|
tyoyo/t5-base-TEDxJP-1body-0context-lr-small | e0c2db0084d7feab0a9fc0dde2993b91bfaf91b4 | 2021-11-26T01:39:24.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | tyoyo | null | tyoyo/t5-base-TEDxJP-1body-0context-lr-small | 1 | null | transformers | 30,424 | Entry not found |
tyoyo/t5-base-TEDxJP-1body-10context | f6207c193fe3f13f9810fa00fffddc7650221a50 | 2021-11-30T19:40:13.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:te_dx_jp",
"transformers",
"generated_from_trainer",
"license:cc-by-sa-4.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | tyoyo | null | tyoyo/t5-base-TEDxJP-1body-10context | 1 | null | transformers | 30,425 | ---
license: cc-by-sa-4.0
tags:
- generated_from_trainer
datasets:
- te_dx_jp
model-index:
- name: t5-base-TEDxJP-1body-10context
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-base-TEDxJP-1body-10context
This model is a fine-tuned version of [sonoisa/t5-base-japanese](https://huggingface.co/sonoisa/t5-base-japanese) on the te_dx_jp dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3833
- Wer: 0.1983
- Mer: 0.1900
- Wil: 0.2778
- Wip: 0.7222
- Hits: 56229
- Substitutions: 6686
- Deletions: 3593
- Insertions: 2909
- Cer: 0.1823
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 64
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer | Mer | Wil | Wip | Hits | Substitutions | Deletions | Insertions | Cer |
|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:------:|:-----:|:-------------:|:---------:|:----------:|:------:|
| 0.5641 | 1.0 | 746 | 0.4426 | 0.2336 | 0.2212 | 0.3143 | 0.6857 | 54711 | 7183 | 4614 | 3742 | 0.2238 |
| 0.4867 | 2.0 | 1492 | 0.4017 | 0.2045 | 0.1972 | 0.2863 | 0.7137 | 55378 | 6764 | 4366 | 2470 | 0.1853 |
| 0.4257 | 3.0 | 2238 | 0.3831 | 0.2008 | 0.1933 | 0.2826 | 0.7174 | 55715 | 6788 | 4005 | 2560 | 0.1784 |
| 0.4038 | 4.0 | 2984 | 0.3797 | 0.1963 | 0.1890 | 0.2776 | 0.7224 | 56028 | 6731 | 3749 | 2578 | 0.1748 |
| 0.3817 | 5.0 | 3730 | 0.3769 | 0.1944 | 0.1877 | 0.2758 | 0.7242 | 55926 | 6663 | 3919 | 2345 | 0.1730 |
| 0.3467 | 6.0 | 4476 | 0.3806 | 0.2111 | 0.2002 | 0.2876 | 0.7124 | 56082 | 6688 | 3738 | 3616 | 0.1916 |
| 0.3361 | 7.0 | 5222 | 0.3797 | 0.1977 | 0.1897 | 0.2780 | 0.7220 | 56173 | 6721 | 3614 | 2816 | 0.1785 |
| 0.3107 | 8.0 | 5968 | 0.3814 | 0.1993 | 0.1910 | 0.2792 | 0.7208 | 56167 | 6720 | 3621 | 2916 | 0.1839 |
| 0.3141 | 9.0 | 6714 | 0.3820 | 0.1991 | 0.1907 | 0.2787 | 0.7213 | 56201 | 6709 | 3598 | 2933 | 0.1859 |
| 0.3122 | 10.0 | 7460 | 0.3833 | 0.1983 | 0.1900 | 0.2778 | 0.7222 | 56229 | 6686 | 3593 | 2909 | 0.1823 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu102
- Datasets 1.15.1
- Tokenizers 0.10.3
|
tyoyo/t5-base-TEDxJP-1body-2context | 2be8122cc99903a0ac0884551509b02a7b7cc729 | 2021-12-06T08:37:39.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:te_dx_jp",
"transformers",
"generated_from_trainer",
"license:cc-by-sa-4.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | tyoyo | null | tyoyo/t5-base-TEDxJP-1body-2context | 1 | null | transformers | 30,426 | ---
license: cc-by-sa-4.0
tags:
- generated_from_trainer
datasets:
- te_dx_jp
model-index:
- name: t5-base-TEDxJP-1body-2context
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-base-TEDxJP-1body-2context
This model is a fine-tuned version of [sonoisa/t5-base-japanese](https://huggingface.co/sonoisa/t5-base-japanese) on the te_dx_jp dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4968
- Wer: 0.1969
- Mer: 0.1895
- Wil: 0.2801
- Wip: 0.7199
- Hits: 55902
- Substitutions: 6899
- Deletions: 3570
- Insertions: 2599
- Cer: 0.1727
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 64
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer | Mer | Wil | Wip | Hits | Substitutions | Deletions | Insertions | Cer |
|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:------:|:-----:|:-------------:|:---------:|:----------:|:------:|
| 0.7136 | 1.0 | 746 | 0.5716 | 0.2512 | 0.2345 | 0.3279 | 0.6721 | 54430 | 7249 | 4692 | 4731 | 0.2344 |
| 0.6267 | 2.0 | 1492 | 0.5152 | 0.2088 | 0.2005 | 0.2917 | 0.7083 | 55245 | 6949 | 4177 | 2732 | 0.2009 |
| 0.5416 | 3.0 | 2238 | 0.4969 | 0.2025 | 0.1948 | 0.2851 | 0.7149 | 55575 | 6871 | 3925 | 2646 | 0.1802 |
| 0.5223 | 4.0 | 2984 | 0.4915 | 0.1989 | 0.1917 | 0.2816 | 0.7184 | 55652 | 6826 | 3893 | 2481 | 0.1754 |
| 0.4985 | 5.0 | 3730 | 0.4929 | 0.1991 | 0.1916 | 0.2814 | 0.7186 | 55759 | 6828 | 3784 | 2603 | 0.1753 |
| 0.4675 | 6.0 | 4476 | 0.4910 | 0.1969 | 0.1897 | 0.2799 | 0.7201 | 55834 | 6859 | 3678 | 2534 | 0.1756 |
| 0.445 | 7.0 | 5222 | 0.4940 | 0.1955 | 0.1884 | 0.2782 | 0.7218 | 55881 | 6821 | 3669 | 2485 | 0.1712 |
| 0.4404 | 8.0 | 5968 | 0.4932 | 0.1979 | 0.1903 | 0.2801 | 0.7199 | 55881 | 6828 | 3662 | 2643 | 0.1742 |
| 0.4525 | 9.0 | 6714 | 0.4951 | 0.1968 | 0.1893 | 0.2799 | 0.7201 | 55939 | 6897 | 3535 | 2632 | 0.1740 |
| 0.4077 | 10.0 | 7460 | 0.4968 | 0.1969 | 0.1895 | 0.2801 | 0.7199 | 55902 | 6899 | 3570 | 2599 | 0.1727 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu102
- Datasets 1.15.1
- Tokenizers 0.10.3
|
tyoyo/t5-base-TEDxJP-1body-3context | bb0e219825caaf6ed9611f63e0851bc90aa1fc18 | 2021-12-03T21:07:34.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:te_dx_jp",
"transformers",
"generated_from_trainer",
"license:cc-by-sa-4.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | tyoyo | null | tyoyo/t5-base-TEDxJP-1body-3context | 1 | null | transformers | 30,427 | ---
license: cc-by-sa-4.0
tags:
- generated_from_trainer
datasets:
- te_dx_jp
model-index:
- name: t5-base-TEDxJP-1body-3context
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-base-TEDxJP-1body-3context
This model is a fine-tuned version of [sonoisa/t5-base-japanese](https://huggingface.co/sonoisa/t5-base-japanese) on the te_dx_jp dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4926
- Wer: 0.1968
- Mer: 0.1894
- Wil: 0.2793
- Wip: 0.7207
- Hits: 55899
- Substitutions: 6836
- Deletions: 3636
- Insertions: 2590
- Cer: 0.1733
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 64
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer | Mer | Wil | Wip | Hits | Substitutions | Deletions | Insertions | Cer |
|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:------:|:-----:|:-------------:|:---------:|:----------:|:------:|
| 0.7082 | 1.0 | 746 | 0.5637 | 0.2626 | 0.2430 | 0.3355 | 0.6645 | 54301 | 7195 | 4875 | 5358 | 0.2552 |
| 0.6213 | 2.0 | 1492 | 0.5150 | 0.2068 | 0.1994 | 0.2899 | 0.7101 | 55107 | 6861 | 4403 | 2462 | 0.1866 |
| 0.5331 | 3.0 | 2238 | 0.4945 | 0.2038 | 0.1958 | 0.2858 | 0.7142 | 55551 | 6845 | 3975 | 2705 | 0.1816 |
| 0.5185 | 4.0 | 2984 | 0.4880 | 0.2003 | 0.1929 | 0.2831 | 0.7169 | 55639 | 6860 | 3872 | 2563 | 0.1779 |
| 0.4963 | 5.0 | 3730 | 0.4858 | 0.1988 | 0.1912 | 0.2810 | 0.7190 | 55837 | 6838 | 3696 | 2662 | 0.1772 |
| 0.4625 | 6.0 | 4476 | 0.4885 | 0.1964 | 0.1894 | 0.2799 | 0.7201 | 55785 | 6875 | 3711 | 2448 | 0.1720 |
| 0.4416 | 7.0 | 5222 | 0.4898 | 0.1962 | 0.1890 | 0.2788 | 0.7212 | 55870 | 6819 | 3682 | 2522 | 0.1726 |
| 0.4287 | 8.0 | 5968 | 0.4894 | 0.1968 | 0.1894 | 0.2790 | 0.7210 | 55889 | 6804 | 3678 | 2580 | 0.1743 |
| 0.4457 | 9.0 | 6714 | 0.4909 | 0.1964 | 0.1891 | 0.2792 | 0.7208 | 55919 | 6858 | 3594 | 2586 | 0.1739 |
| 0.4068 | 10.0 | 7460 | 0.4926 | 0.1968 | 0.1894 | 0.2793 | 0.7207 | 55899 | 6836 | 3636 | 2590 | 0.1733 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu102
- Datasets 1.15.1
- Tokenizers 0.10.3
|
uclanlp/plbart-cs-java | fa09d595f1a3b9950cee749c13b0feaa2d08ad4e | 2021-11-09T17:08:03.000Z | [
"pytorch",
"plbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | uclanlp | null | uclanlp/plbart-cs-java | 1 | null | transformers | 30,428 | Entry not found |
uclanlp/plbart-multi_task-ruby | fa64c4ae5d694c20331834cf06857c623d6a0e45 | 2022-03-02T07:32:39.000Z | [
"pytorch",
"plbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | uclanlp | null | uclanlp/plbart-multi_task-ruby | 1 | null | transformers | 30,429 | Entry not found |
uclanlp/plbart-single_task-en_go | 7f519de25814cf4e42b848fa62e2e0efea50c2c1 | 2022-03-02T07:08:42.000Z | [
"pytorch",
"plbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | uclanlp | null | uclanlp/plbart-single_task-en_go | 1 | null | transformers | 30,430 | Entry not found |
uclanlp/plbart-single_task-en_js | 8002294481873d498f34ffae383abd3f74f675fb | 2022-03-02T07:10:01.000Z | [
"pytorch",
"plbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | uclanlp | null | uclanlp/plbart-single_task-en_js | 1 | null | transformers | 30,431 | Entry not found |
uclanlp/plbart-single_task-java_en | 3ec7e9e770cb2450fd2870a35b4c3dd0a3d04ae3 | 2022-03-02T06:57:42.000Z | [
"pytorch",
"plbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | uclanlp | null | uclanlp/plbart-single_task-java_en | 1 | null | transformers | 30,432 | Entry not found |
uclanlp/plbart-single_task-strong-generation | 7694c7b4382823b4aea57bc49faece3f95a146d1 | 2022-03-02T07:20:57.000Z | [
"pytorch",
"plbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | uclanlp | null | uclanlp/plbart-single_task-strong-generation | 1 | null | transformers | 30,433 | Entry not found |
uclanlp/visualbert-vqa-pre | 61b7a7df390dd946c119da9766848078ef6e463b | 2021-05-31T11:37:02.000Z | [
"pytorch",
"visual_bert",
"pretraining",
"transformers"
] | null | false | uclanlp | null | uclanlp/visualbert-vqa-pre | 1 | 1 | transformers | 30,434 | Entry not found |
ufal/byt5-small-multilexnorm2021-sl | befcb53cc6d5c373194be9315eafba133be3647e | 2021-10-20T12:49:57.000Z | [
"pytorch",
"t5",
"text2text-generation",
"sl",
"dataset:mc4",
"dataset:wikipedia",
"dataset:multilexnorm",
"arxiv:2105.13626",
"arxiv:1907.06292",
"transformers",
"lexical normalization",
"license:apache-2.0",
"autotrain_compatible"
] | text2text-generation | false | ufal | null | ufal/byt5-small-multilexnorm2021-sl | 1 | null | transformers | 30,435 | ---
language: sl
datasets:
- mc4
- wikipedia
- multilexnorm
tags:
- lexical normalization
license: apache-2.0
---
# Fine-tuned ByT5-small for MultiLexNorm (Slovenian version)

This is the official release of the fine-tuned models for **the winning entry** to the [*W-NUT 2021: Multilingual Lexical Normalization (MultiLexNorm)* shared task](https://noisy-text.github.io/2021/multi-lexnorm.html), which evaluates lexical-normalization systems on 12 social media datasets in 11 languages.
Our system is based on [ByT5](https://arxiv.org/abs/2105.13626), which we first pre-train on synthetic data and then fine-tune on authentic normalization data. It achieves the best performance by a wide margin in intrinsic evaluation, and also the best performance in extrinsic evaluation through dependency parsing. In addition to these fine-tuned models, we also release the source files on [GitHub](https://github.com/ufal/multilexnorm2021) and an interactive demo on [Google Colab](https://colab.research.google.com/drive/1rxpI8IlKk-D2crFqi2hdzbTBIezqgsCg?usp=sharing).
## How to use
The model was *not* fine-tuned in a standard sentence-to-sentence setting – instead, it was tailored to the token-to-token definition of MultiLexNorm data. Please refer to [**the interactive demo on Colab notebook**](https://colab.research.google.com/drive/1rxpI8IlKk-D2crFqi2hdzbTBIezqgsCg?usp=sharing) to learn how to use these models.
## How to cite
```bibtex
@inproceedings{wnut-ufal,
title= "{ÚFAL} at {MultiLexNorm} 2021: Improving Multilingual Lexical Normalization by Fine-tuning {ByT5}",
author = "Samuel, David and Straka, Milan",
booktitle = "Proceedings of the 7th Workshop on Noisy User-generated Text (W-NUT 2021)",
year = "2021",
publisher = "Association for Computational Linguistics",
address = "Punta Cana, Dominican Republic"
}
```
## ByT5 - Small
ByT5 is a tokenizer-free version of [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) and generally follows the architecture of [MT5](https://huggingface.co/google/mt5-small).
ByT5 was only pre-trained on [mC4](https://www.tensorflow.org/datasets/catalog/c4#c4multilingual) excluding any supervised training with an average span-mask of 20 UTF-8 characters. Therefore, this model has to be fine-tuned before it is useable on a downstream task.
ByT5 works especially well on noisy text data,*e.g.*, `google/byt5-small` significantly outperforms [mt5-small](https://huggingface.co/google/mt5-small) on [TweetQA](https://arxiv.org/abs/1907.06292).
Paper: [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626)
Authors: *Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel*
|
ughvom/britnayBOTMAIN | 674d6c5bbab9f02c2d0786b5628e6f8b7f1f6208 | 2022-01-09T19:32:32.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | ughvom | null | ughvom/britnayBOTMAIN | 1 | null | transformers | 30,436 | ---
tags:
- conversational
---
# britnayBOTMAIN Model |
umr55766/DialogGPT-small-peppa-pig | 608fbffa1145f9f82f2b0b8dd496a998e3267472 | 2021-08-30T17:08:23.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | umr55766 | null | umr55766/DialogGPT-small-peppa-pig | 1 | null | transformers | 30,437 | ---
tags:
- conversational
---
# Peppa Pig DialogGPT-small Model |
unicamp-dl/mt5-base-en-pt-msmarco-v2 | 129d263c50bc3cd1d0c6effe794c542ea3ad3ef5 | 2022-01-05T23:16:47.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"pt",
"dataset:msmarco",
"arxiv:2108.13897",
"transformers",
"msmarco",
"t5",
"tensorflow",
"pt-br",
"license:mit",
"autotrain_compatible"
] | text2text-generation | false | unicamp-dl | null | unicamp-dl/mt5-base-en-pt-msmarco-v2 | 1 | null | transformers | 30,438 | ---
language: pt
license: mit
tags:
- msmarco
- t5
- pytorch
- tensorflow
- pt
- pt-br
datasets:
- msmarco
widget:
- text: "Texto de exemplo em português"
inference: false
---
# mt5-base Reranker finetuned on mMARCO
## Introduction
mT5-base-en-pt-msmarco-v2 is a mT5-based model fine-tuned on a bilingual version of MS MARCO passage dataset. This bilingual dataset version is formed by the original MS MARCO dataset (in English) and a Portuguese translated version. In the v2 version, the Portuguese dataset was translated using Google Translate.
Further information about the dataset or the translation method can be found on our paper [**mMARCO: A Multilingual Version of MS MARCO Passage Ranking Dataset**](https://arxiv.org/abs/2108.13897) and [mMARCO](https://github.com/unicamp-dl/mMARCO) repository.
## Usage
```python
from transformers import T5Tokenizer, MT5ForConditionalGeneration
model_name = 'unicamp-dl/mt5-base-en-pt-msmarco-v2'
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = MT5ForConditionalGeneration.from_pretrained(model_name)
```
# Citation
If you use mt5-base-en-pt-msmarco-v2, please cite:
@misc{bonifacio2021mmarco,
title={mMARCO: A Multilingual Version of MS MARCO Passage Ranking Dataset},
author={Luiz Henrique Bonifacio and Vitor Jeronymo and Hugo Queiroz Abonizio and Israel Campiotti and Marzieh Fadaee and and Roberto Lotufo and Rodrigo Nogueira},
year={2021},
eprint={2108.13897},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
|
unicamp-dl/ptt5-base-pt-msmarco-10k-v1 | f849cb52d0140520cd3cfc85a24e092ae04fea21 | 2022-01-05T21:29:26.000Z | [
"pytorch",
"t5",
"text2text-generation",
"pt",
"dataset:msmarco",
"arxiv:2108.13897",
"transformers",
"msmarco",
"tensorflow",
"pt-br",
"license:mit",
"autotrain_compatible"
] | text2text-generation | false | unicamp-dl | null | unicamp-dl/ptt5-base-pt-msmarco-10k-v1 | 1 | null | transformers | 30,439 | ---
language: pt
license: mit
tags:
- msmarco
- t5
- pytorch
- tensorflow
- pt
- pt-br
datasets:
- msmarco
widget:
- text: "Texto de exemplo em português"
inference: false
---
# PTT5-base Reranker finetuned on Portuguese MS MARCO
## Introduction
ptt5-base-msmarco-pt-10k-v1 is a T5-based model pretrained in the BrWac corpus, finetuned on Portuguese translated version of MS MARCO passage dataset. In the version v1, the Portuguese dataset was translated using [Helsinki](https://huggingface.co/Helsinki-NLP) NMT model. This model was finetuned for 10k steps.
Further information about the dataset or the translation method can be found on our [**mMARCO: A Multilingual Version of MS MARCO Passage Ranking Dataset**](https://arxiv.org/abs/2108.13897) and [mMARCO](https://github.com/unicamp-dl/mMARCO) repository.
## Usage
```python
from transformers import T5Tokenizer, T5ForConditionalGeneration
model_name = 'unicamp-dl/ptt5-base-msmarco-pt-10k-v1'
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
```
# Citation
If you use ptt5-base-msmarco-pt-10k-v1, please cite:
@misc{bonifacio2021mmarco,
title={mMARCO: A Multilingual Version of MS MARCO Passage Ranking Dataset},
author={Luiz Henrique Bonifacio and Vitor Jeronymo and Hugo Queiroz Abonizio and Israel Campiotti and Marzieh Fadaee and and Roberto Lotufo and Rodrigo Nogueira},
year={2021},
eprint={2108.13897},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
|
usami/electra-base-discriminator-finetuned-squad | 5e3311eaafbc4807b21829e0bea8452473e0ab3c | 2021-11-24T09:39:13.000Z | [
"pytorch",
"electra",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | usami | null | usami/electra-base-discriminator-finetuned-squad | 1 | null | transformers | 30,440 | Entry not found |
usernamtadejm/flairbookmodel1234 | 1d12e5a2b5e4903e5b21eaef7bd496f9bdc96676 | 2022-01-07T15:35:52.000Z | [
"pytorch",
"flair",
"token-classification"
] | token-classification | false | usernamtadejm | null | usernamtadejm/flairbookmodel1234 | 1 | null | flair | 30,441 | ---
tags:
- flair
- token-classification
widget:
- text: "does this work"
---
## Test model README
Some test README description
|
vachevkd/dg-t5sm-race-v01 | f40d44a72e9ace5032430c9bb621a1d214345a8e | 2021-12-20T20:00:59.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | vachevkd | null | vachevkd/dg-t5sm-race-v01 | 1 | null | transformers | 30,442 | Entry not found |
vaishnavi/indic-bert-512 | 5f753cca3ef0e6c4ec87f26aad82ed2026c550d3 | 2021-04-08T06:38:32.000Z | [
"pytorch",
"albert",
"en",
"dataset:AI4Bharat IndicNLP Corpora",
"transformers",
"license:mit"
] | null | false | vaishnavi | null | vaishnavi/indic-bert-512 | 1 | null | transformers | 30,443 | ---
language: en
license: mit
datasets:
- AI4Bharat IndicNLP Corpora
---
# IndicBERT
IndicBERT is a multilingual ALBERT model pretrained exclusively on 12 major Indian languages. It is pre-trained on our novel monolingual corpus of around 9 billion tokens and subsequently evaluated on a set of diverse tasks. IndicBERT has much fewer parameters than other multilingual models (mBERT, XLM-R etc.) while it also achieves a performance on-par or better than these models.
The 12 languages covered by IndicBERT are: Assamese, Bengali, English, Gujarati, Hindi, Kannada, Malayalam, Marathi, Oriya, Punjabi, Tamil, Telugu.
The code can be found [here](https://github.com/divkakwani/indic-bert). For more information, checkout our [project page](https://indicnlp.ai4bharat.org/) or our [paper](https://indicnlp.ai4bharat.org/papers/arxiv2020_indicnlp_corpus.pdf).
## Pretraining Corpus
We pre-trained indic-bert on AI4Bharat's monolingual corpus. The corpus has the following distribution of languages:
| Language | as | bn | en | gu | hi | kn | |
| ----------------- | ------ | ------ | ------ | ------ | ------ | ------ | ------- |
| **No. of Tokens** | 36.9M | 815M | 1.34B | 724M | 1.84B | 712M | |
| **Language** | **ml** | **mr** | **or** | **pa** | **ta** | **te** | **all** |
| **No. of Tokens** | 767M | 560M | 104M | 814M | 549M | 671M | 8.9B |
## Evaluation Results
IndicBERT is evaluated on IndicGLUE and some additional tasks. The results are summarized below. For more details about the tasks, refer our [official repo](https://github.com/divkakwani/indic-bert)
#### IndicGLUE
Task | mBERT | XLM-R | IndicBERT
-----| ----- | ----- | ------
News Article Headline Prediction | 89.58 | 95.52 | **95.87**
Wikipedia Section Title Prediction| **73.66** | 66.33 | 73.31
Cloze-style multiple-choice QA | 39.16 | 27.98 | **41.87**
Article Genre Classification | 90.63 | 97.03 | **97.34**
Named Entity Recognition (F1-score) | **73.24** | 65.93 | 64.47
Cross-Lingual Sentence Retrieval Task | 21.46 | 13.74 | **27.12**
Average | 64.62 | 61.09 | **66.66**
#### Additional Tasks
Task | Task Type | mBERT | XLM-R | IndicBERT
-----| ----- | ----- | ------ | -----
BBC News Classification | Genre Classification | 60.55 | **75.52** | 74.60
IIT Product Reviews | Sentiment Analysis | 74.57 | **78.97** | 71.32
IITP Movie Reviews | Sentiment Analaysis | 56.77 | **61.61** | 59.03
Soham News Article | Genre Classification | 80.23 | **87.6** | 78.45
Midas Discourse | Discourse Analysis | 71.20 | **79.94** | 78.44
iNLTK Headlines Classification | Genre Classification | 87.95 | 93.38 | **94.52**
ACTSA Sentiment Analysis | Sentiment Analysis | 48.53 | 59.33 | **61.18**
Winograd NLI | Natural Language Inference | 56.34 | 55.87 | **56.34**
Choice of Plausible Alternative (COPA) | Natural Language Inference | 54.92 | 51.13 | **58.33**
Amrita Exact Paraphrase | Paraphrase Detection | **93.81** | 93.02 | 93.75
Amrita Rough Paraphrase | Paraphrase Detection | 83.38 | 82.20 | **84.33**
Average | | 69.84 | **74.42** | 73.66
\* Note: all models have been restricted to a max_seq_length of 128.
## Downloads
The model can be downloaded [here](https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/models/indic-bert-v1.tar.gz). Both tf checkpoints and pytorch binaries are included in the archive. Alternatively, you can also download it from [Huggingface](https://huggingface.co/ai4bharat/indic-bert).
## Citing
If you are using any of the resources, please cite the following article:
```
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
```
We would like to hear from you if:
- You are using our resources. Please let us know how you are putting these resources to use.
- You have any feedback on these resources.
## License
The IndicBERT code (and models) are released under the MIT License.
## Contributors
- Divyanshu Kakwani
- Anoop Kunchukuttan
- Gokul NC
- Satish Golla
- Avik Bhattacharyya
- Mitesh Khapra
- Pratyush Kumar
This work is the outcome of a volunteer effort as part of [AI4Bharat initiative](https://ai4bharat.org).
## Contact
- Anoop Kunchukuttan ([[email protected]](mailto:[email protected]))
- Mitesh Khapra ([[email protected]](mailto:[email protected]))
- Pratyush Kumar ([[email protected]](mailto:[email protected]))
|
valeriazen/ruT5-base-finetuned-xsum | 16c05b534d2cfc77907650c7f8581311f071b449 | 2022-01-18T17:07:48.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | valeriazen | null | valeriazen/ruT5-base-finetuned-xsum | 1 | null | transformers | 30,444 | Entry not found |
valeriulacatusu/distilbert-base-uncased-finetuned-ner | 204231d2eee5c3a81a643930820504ed8ea04b0b | 2021-12-21T14:44:24.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"token-classification",
"transformers",
"autotrain_compatible"
] | token-classification | false | valeriulacatusu | null | valeriulacatusu/distilbert-base-uncased-finetuned-ner | 1 | null | transformers | 30,445 | Entry not found |
valhalla/cogview-vqvae-test | cd3cf9f3b20baf18bd702f3ff7374bbf53c72cef | 2021-06-21T07:09:01.000Z | [
"pytorch",
"cog_view",
"transformers"
] | null | false | valhalla | null | valhalla/cogview-vqvae-test | 1 | null | transformers | 30,446 | Entry not found |
valhalla/s2t_covost2_en_de_small | e41dbdadc2156479ff5905d96a2abb4b1a557681 | 2021-02-24T07:22:30.000Z | [
"pytorch",
"speech_to_text_transformer",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | valhalla | null | valhalla/s2t_covost2_en_de_small | 1 | null | transformers | 30,447 | Entry not found |
varun3dec/Pbi-Summarization-model | 989b30bc2eb16715877d48c00b7a752e4f80b210 | 2022-01-10T07:09:54.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | varun3dec | null | varun3dec/Pbi-Summarization-model | 1 | null | transformers | 30,448 | |
vdivya/wav2vec2-base-timit-demo-colab | 7ba031b54425313960e43c8deaade5154469245d | 2022-01-03T09:51:04.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | vdivya | null | vdivya/wav2vec2-base-timit-demo-colab | 1 | null | transformers | 30,449 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: wav2vec2-base-timit-demo-colab
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-timit-demo-colab
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4630
- Wer: 0.3399
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 32
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 3.4454 | 4.0 | 500 | 1.2920 | 0.9381 |
| 0.5869 | 8.0 | 1000 | 0.4634 | 0.4297 |
| 0.2216 | 12.0 | 1500 | 0.4481 | 0.3778 |
| 0.1283 | 16.0 | 2000 | 0.4651 | 0.3741 |
| 0.0872 | 20.0 | 2500 | 0.4762 | 0.3548 |
| 0.0635 | 24.0 | 3000 | 0.4495 | 0.3513 |
| 0.0482 | 28.0 | 3500 | 0.4630 | 0.3399 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.13.3
- Tokenizers 0.10.3
|
versae/mt5-base-finetuned-modernisa | d807ca7f3c8a6c21fcc5abd722afe0803cbc6ee6 | 2022-07-20T10:25:19.000Z | [
"pytorch",
"tensorboard",
"mt5",
"text2text-generation",
"dataset:versae/modernisa",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | versae | null | versae/mt5-base-finetuned-modernisa | 1 | 1 | transformers | 30,450 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- bleu
datasets:
- versae/modernisa
model-index:
- name: mt5-base-finetuned-modernisa
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mt5-base-finetuned-modernisa
This model is a fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3179
- Bleu: 81.9164
- Gen Len: 11.1876
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|
| 0.4588 | 0.35 | 10000 | 0.4023 | 78.1616 | 11.1577 |
| 0.3982 | 0.71 | 20000 | 0.3584 | 79.3456 | 11.144 |
| 0.3465 | 1.06 | 30000 | 0.3424 | 80.4057 | 11.1625 |
| 0.3236 | 1.42 | 40000 | 0.3349 | 80.9978 | 11.1869 |
| 0.2983 | 1.77 | 50000 | 0.3243 | 81.5426 | 11.1925 |
| 0.278 | 2.13 | 60000 | 0.3210 | 81.794 | 11.2047 |
| 0.2584 | 2.48 | 70000 | 0.3205 | 81.8086 | 11.1986 |
| 0.2609 | 2.84 | 80000 | 0.3179 | 81.9164 | 11.1876 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.15.2.dev0
- Tokenizers 0.10.3
|
vesteinn/XLMr-ENIS-QA-IsQ-EnA | edbb89323deefc3041b82352590d7fa142c2a27c | 2021-09-27T22:09:30.000Z | [
"pytorch",
"xlm-roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | vesteinn | null | vesteinn/XLMr-ENIS-QA-IsQ-EnA | 1 | null | transformers | 30,451 | ----
language:
- is
- en
thumbnail:
tags:
- icelandic
- qa
license:
datasets:
- ic3
- igc
metrics:
- em
- f1
widget:
- text: "Hverrar trúar var Halldór Laxness ?"
context: "Halldór Kiljan Laxness was born in 1902 in Reykjavik , the capital of Iceland , but spent his youth in the country . From the age of seventeen on , he travelled and lived abroad , chiefly on the European continent . He was influenced by expressionism and other modern currents in Germany and France . In the mid-twenties he was converted to Catholicism ; his spiritual experiences are reflected in several books of an autobiographical nature , chiefly Undir Helgahnúk ( Under the Holy Mountain ) , 1924 . In 1927 , he published his first important novel , Vefarinn mikli frá Kasmír ( The Great Weaver from Kashmir ) . Laxness’s religious period did not last long ; during a visit to America he became attracted to socialism . Alþydubókin ( The Book of the People ) , 1929 , is evidence of a change toward a socialist outlook . In 1930 , Laxness settled in Iceland . Laxness’s main achievement consists of three novel cycles written during the thirties , dealing with the people of Iceland . Þú vínviður hreini , 1931 , and Fuglinn í fjörunni , 1932 , ( both translated as Salka Valka ) , tell the story of a poor fisher girl ; Sjálfstætt fólk ( Independent People ) , 1934 - 35 , treats the fortunes of small farmers , whereas the tetralogy Ljós heimsins ( The Light of the World ) , 1937 - 40 , has as its hero an Icelandic folk poet . Laxness’s later works are frequently historical and influenced by the saga tradition : Íslandsklukkan ( The Bell of Iceland ) , 1943 - 46 , Gerpla ( The Happy Warriors ) , 1952 , and Paradísarheimt ( Paradise Reclaimed ) , 1960 . Laxness is also the author of the topical and sharply polemical Atómstöðin ( The Atom Station ) , 1948 ."
---
# XLMr-ENIS-QA-IsQ-EnA
## Model description
This is an Icelandic reading comprehension Q&A model.
## Intended uses & limitations
This model is part of my MSc thesis about Q&A for Icelandic.
#### How to use
```python
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
tokenizer = AutoTokenizer.from_pretrained("vesteinn/IceBERT-QA")
model = AutoModelForQuestionAnswering.from_pretrained("vesteinn/IceBERT-QA")
```
#### Limitations and bias
## Training data
Translated English datasets were used along with the Natural Questions in Icelandic dataset.
## Training procedure
## Eval results
### BibTeX entry and citation info
```bibtex
```
|
vesteinn/open-qa-icelandic-densephrases | 9c4edf656c25cd1df44cce15bd7b0d346c85fd84 | 2021-09-30T10:35:31.000Z | [
"pytorch",
"xlm-roberta",
"transformers"
] | null | false | vesteinn | null | vesteinn/open-qa-icelandic-densephrases | 1 | null | transformers | 30,452 | Entry not found |
vinko/shitposting_AI | e83abecb7c0d7729ddd2a1d12c43f67ad0c697f0 | 2022-01-18T13:07:53.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | vinko | null | vinko/shitposting_AI | 1 | null | transformers | 30,453 | Entry not found |
vionwinnie/t5-reddit | 2d017e49c33a9a6e709eb3b3ddf93374395cc6ce | 2021-07-07T08:15:48.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | vionwinnie | null | vionwinnie/t5-reddit | 1 | 1 | transformers | 30,454 | This T5 small model finetuned on Reddit data.
It has two subtasks:
1. title generation
2. tag classification
|
vocab-transformers/dense_encoder-msmarco-distilbert-word2vec256k-MLM_210k_emb_updated | 336c0b6088cc26fcab7239d66c80eb34bf7093aa | 2022-02-21T20:13:56.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | vocab-transformers | null | vocab-transformers/dense_encoder-msmarco-distilbert-word2vec256k-MLM_210k_emb_updated | 1 | null | sentence-transformers | 30,455 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# dense_encoder-msmarco-distilbert-word2vec256k-MLM_210k
**Note: Token embeddings where updated!**
This model is based on [vocab-transformers/msmarco-distilbert-word2vec256k-MLM_210k](https://huggingface.co/vocab-transformers/msmarco-distilbert-word2vec256k-MLM_210k) with a 256k sized vocabulary initialized with word2vec that has been trained with MLM for 210k.
It has been trained on MS MARCO using [MarginMSELoss](https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/ms_marco/train_bi-encoder_margin-mse.py). See the train_script.py in this repository.
Performance:
- MS MARCO dev: 34.91 (MRR@10)
- TREC-DL 2019: 67.56 (nDCG@10)
- TREC-DL 2020: 68.18 (nDCG@10)
## Usage (Sentence-Transformers)
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 7858 with parameters:
```
{'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.MarginMSELoss.MarginMSELoss`
Parameters of the fit()-Method:
```
{
"epochs": 30,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": null,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
vocab-transformers/msmarco-distilbert-word2vec256k-MLM_210k_emb_updated | 03578fe76922f13abf1d8ccf5bda6ee4fe83ecbb | 2022-02-21T20:12:32.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | vocab-transformers | null | vocab-transformers/msmarco-distilbert-word2vec256k-MLM_210k_emb_updated | 1 | null | transformers | 30,456 | # Model
This model is based on [nicoladecao/msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec.
This model has been trained with MLM on the MS MARCO corpus collection for 210k steps. See train_mlm.py for the train script. It was run on 2x V100 GPUs.
**Note: Token embeddings where updated!**
|
vocab-transformers/msmarco-distilbert-word2vec256k-MLM_445k_emb_updated | 7b8f2c0865874a1db14688b860af694dde703da7 | 2022-02-21T20:12:37.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | vocab-transformers | null | vocab-transformers/msmarco-distilbert-word2vec256k-MLM_445k_emb_updated | 1 | null | transformers | 30,457 | # Model
This model is based on [nicoladecao/msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec.
This model has been trained with MLM on the MS MARCO corpus collection for 445k steps. See train_mlm.py for the train script. It was run on 2x V100 GPUs.
**Note: Token embeddings where updated!** |
voidful/bart_base_cnndm | 28ec401fba1ea98d9ad1f3eec6222418efbc037f | 2021-10-27T08:49:10.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | voidful | null | voidful/bart_base_cnndm | 1 | null | transformers | 30,458 | Entry not found |
voidful/bart_base_squad_cq_a | 081a618aa7e71cc74f8b5d2956cfe411e7885e78 | 2021-07-04T16:27:53.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | voidful | null | voidful/bart_base_squad_cq_a | 1 | null | transformers | 30,459 | Entry not found |
voidful/phoneme_bart_base | 0fae7c4e82f8d936b2bf2c12924262931bc3f49e | 2022-02-21T06:32:07.000Z | [
"pytorch",
"bart",
"feature-extraction",
"transformers"
] | feature-extraction | false | voidful | null | voidful/phoneme_bart_base | 1 | null | transformers | 30,460 | Entry not found |
vppvgit/BiblItBERT-1 | 6db466ee095bed01f992ab3086ca6183400c99f9 | 2021-09-27T09:40:47.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | fill-mask | false | vppvgit | null | vppvgit/BiblItBERT-1 | 1 | null | transformers | 30,461 | ---
tags:
- generated_from_trainer
datasets:
- null
model-index:
- name: BiblItBERT-1
results:
- task:
name: Masked Language Modeling
type: fill-mask
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BiblItBERT-1
This model is a fine-tuned version of [vppvgit/BiblItBERT](https://huggingface.co/vppvgit/BiblItBERT) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7775
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 0
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:------:|:---------------:|
| 1.5764 | 1.0 | 16528 | 1.5214 |
| 1.4572 | 2.0 | 33056 | 1.4201 |
| 1.3787 | 3.0 | 49584 | 1.3728 |
| 1.3451 | 4.0 | 66112 | 1.3245 |
| 1.3066 | 5.0 | 82640 | 1.2614 |
| 1.2447 | 6.0 | 99168 | 1.2333 |
| 1.2172 | 7.0 | 115696 | 1.2149 |
| 1.2079 | 8.0 | 132224 | 1.1853 |
| 1.2167 | 9.0 | 148752 | 1.1586 |
| 1.2056 | 10.0 | 165280 | 1.1503 |
| 1.1307 | 11.0 | 181808 | 1.1224 |
| 1.1689 | 12.0 | 198336 | 1.1074 |
| 1.1007 | 13.0 | 214864 | 1.0924 |
| 1.0901 | 14.0 | 231392 | 1.0659 |
| 1.0667 | 15.0 | 247920 | 1.0650 |
| 1.0434 | 16.0 | 264448 | 1.0362 |
| 1.0333 | 17.0 | 280976 | 1.0250 |
| 1.0342 | 18.0 | 297504 | 1.0198 |
| 1.0059 | 19.0 | 314032 | 0.9950 |
| 0.9719 | 20.0 | 330560 | 0.9836 |
| 0.9863 | 21.0 | 347088 | 0.9873 |
| 0.9781 | 22.0 | 363616 | 0.9724 |
| 0.9369 | 23.0 | 380144 | 0.9599 |
| 0.9578 | 24.0 | 396672 | 0.9557 |
| 0.9253 | 25.0 | 413200 | 0.9400 |
| 0.9441 | 26.0 | 429728 | 0.9222 |
| 0.9138 | 27.0 | 446256 | 0.9140 |
| 0.882 | 28.0 | 462784 | 0.9045 |
| 0.864 | 29.0 | 479312 | 0.8880 |
| 0.8632 | 30.0 | 495840 | 0.9023 |
| 0.8342 | 32.0 | 528896 | 0.8740 |
| 0.8037 | 34.0 | 561952 | 0.8647 |
| 0.8119 | 37.0 | 611536 | 0.8358 |
| 0.8011 | 38.0 | 628064 | 0.8252 |
| 0.786 | 39.0 | 644592 | 0.8228 |
| 0.7697 | 41.0 | 677648 | 0.8138 |
| 0.7485 | 42.0 | 694176 | 0.8104 |
| 0.7689 | 43.0 | 710704 | 0.8018 |
| 0.7401 | 45.0 | 743760 | 0.7957 |
| 0.7031 | 47.0 | 776816 | 0.7726 |
| 0.7578 | 48.0 | 793344 | 0.7864 |
| 0.7298 | 49.0 | 809872 | 0.7775 |
### Framework versions
- Transformers 4.10.3
- Pytorch 1.9.0+cu102
- Datasets 1.12.1
- Tokenizers 0.10.3
|
vppvgit/BiblItBERT | d4b82537930506124f4105dc06100afac515e30c | 2021-09-20T17:47:46.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | vppvgit | null | vppvgit/BiblItBERT | 1 | null | transformers | 30,462 | Entry not found |
vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt | 592f522eaa3c800b2c1c185c78f107088d873939 | 2022-01-09T03:25:27.000Z | [
"pytorch",
"onnx",
"bert",
"transformers"
] | null | false | vuiseng9 | null | vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt | 1 | null | transformers | 30,463 | This model is a downstream optimization of [```vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt```](https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt) using [OpenVINO/NNCF](https://github.com/openvinotoolkit/nncf). Applied optimization includes:
1. magnitude sparsification at 50% upon initialization. Parameters are ranked globally via thier absolute norm. Only linear layers of self-attention and ffnn are targeted.
2. NNCF Quantize-Aware Training - Symmetric 8-bit for both weight and activation on all learnable layers.
3. Custom distillation with large model ```bert-large-uncased-whole-word-masking-finetuned-squad```
```
eval_exact_match = 80.2081
eval_f1 = 87.5921
eval_samples = 10784
```
# Setup
```bash
# OpenVINO/NNCF
git clone https://github.com/vuiseng9/nncf && cd nncf
git checkout tld-poc
git reset --hard 1dec7afe7a4b567c059fcf287ea2c234980fded2
python setup.py develop
pip install -r examples/torch/requirements.txt
# Huggingface nn_pruning
git clone https://github.com/vuiseng9/nn_pruning && cd nn_pruning
git checkout reproduce-evaluation
git reset --hard 2d4e196d694c465e43e5fbce6c3836d0a60e1446
pip install -e ".[dev]"
# Huggingface Transformers
git clone https://github.com/vuiseng9/transformers && cd transformers
git checkout tld-poc
git reset --hard 10a1e29d84484e48fd106f58957d9ffc89dc43c5
pip install -e .
head -n 1 examples/pytorch/question-answering/requirements.txt | xargs -i pip install {}
# Additional dependencies
pip install onnx
```
# Train
```bash
git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt
BASE_MODEL=/path/to/cloned_repo_above #to-revise
wget https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt/raw/main/nncf_bert_squad_sparsity.json
NNCF_CFG=/path/to/downloaded_nncf_cfg_above #to-revise
OUTROOT=/path/to/train_output_root #to-revise
WORKDIR=transformers/examples/pytorch/question-answering #to-revise
RUNID=bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt
cd $WORKDIR
OUTDIR=$OUTROOT/$RUNID
mkdir -p $OUTDIR
export CUDA_VISIBLE_DEVICES=0
NEPOCH=5
python run_qa.py \
--model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \
--optimize_model_before_eval \
--optimized_checkpoint $BASE_MODEL \
--dataset_name squad \
--do_eval \
--do_train \
--evaluation_strategy steps \
--eval_steps 250 \
--learning_rate 3e-5 \
--lr_scheduler_type cosine_with_restarts \
--warmup_ratio 0.25 \
--cosine_cycles 1 \
--teacher bert-large-uncased-whole-word-masking-finetuned-squad \
--teacher_ratio 0.9 \
--num_train_epochs $NEPOCH \
--per_device_eval_batch_size 128 \
--per_device_train_batch_size 16 \
--max_seq_length 384 \
--doc_stride 128 \
--save_steps 250 \
--nncf_config $NNCF_CFG \
--logging_steps 1 \
--overwrite_output_dir \
--run_name $RUNID \
--output_dir $OUTDIR
```
# Eval
This repo must be cloned locally.
```bash
git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt
MODELROOT=/path/to/cloned_repo_above #to-revise
export CUDA_VISIBLE_DEVICES=0
OUTDIR=eval-bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt
WORKDIR=transformers/examples/pytorch/question-answering #to-revise
cd $WORKDIR
mkdir $OUTDIR
nohup python run_qa.py \
--model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \
--dataset_name squad \
--optimize_model_before_eval \
--qat_checkpoint $MODELROOT/checkpoint-26250 \
--nncf_config $MODELROOT/nncf_bert_squad_sparsity.json \
--to_onnx $OUTDIR/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt.onnx \
--do_eval \
--per_device_eval_batch_size 128 \
--max_seq_length 384 \
--doc_stride 128 \
--overwrite_output_dir \
--output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log &
```
|
vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt | 03c83741f6b8dc1eaa90d7917df88e5a69ebf53e | 2022-01-09T03:14:14.000Z | [
"pytorch",
"onnx",
"bert",
"transformers"
] | null | false | vuiseng9 | null | vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt | 1 | null | transformers | 30,464 | This model is a downstream optimization of [```vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt```](https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt) using [OpenVINO/NNCF](https://github.com/openvinotoolkit/nncf). Applied optimization includes:
1. magnitude sparsification at 60% upon initialization. Parameters are ranked globally via thier absolute norm. Only linear layers of self-attention and ffnn are targeted.
2. NNCF Quantize-Aware Training - Symmetric 8-bit for both weight and activation on all learnable layers.
3. Custom distillation with large model ```bert-large-uncased-whole-word-masking-finetuned-squad```
```
eval_exact_match = 80.3122
eval_f1 = 87.6162
eval_samples = 10784
```
# Setup
```bash
# OpenVINO/NNCF
git clone https://github.com/vuiseng9/nncf && cd nncf
git checkout tld-poc
git reset --hard 1dec7afe7a4b567c059fcf287ea2c234980fded2
python setup.py develop
pip install -r examples/torch/requirements.txt
# Huggingface nn_pruning
git clone https://github.com/vuiseng9/nn_pruning && cd nn_pruning
git checkout reproduce-evaluation
git reset --hard 2d4e196d694c465e43e5fbce6c3836d0a60e1446
pip install -e ".[dev]"
# Huggingface Transformers
git clone https://github.com/vuiseng9/transformers && cd transformers
git checkout tld-poc
git reset --hard 10a1e29d84484e48fd106f58957d9ffc89dc43c5
pip install -e .
head -n 1 examples/pytorch/question-answering/requirements.txt | xargs -i pip install {}
# Additional dependencies
pip install onnx
```
# Train
```bash
git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt
BASE_MODEL=/path/to/cloned_repo_above #to-revise
wget https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt/raw/main/nncf_bert_squad_sparsity.json
NNCF_CFG=/path/to/downloaded_nncf_cfg_above #to-revise
OUTROOT=/path/to/train_output_root #to-revise
WORKDIR=transformers/examples/pytorch/question-answering #to-revise
RUNID=bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt
cd $WORKDIR
OUTDIR=$OUTROOT/$RUNID
mkdir -p $OUTDIR
export CUDA_VISIBLE_DEVICES=0
NEPOCH=5
python run_qa.py \
--model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \
--optimize_model_before_eval \
--optimized_checkpoint $BASE_MODEL \
--dataset_name squad \
--do_eval \
--do_train \
--evaluation_strategy steps \
--eval_steps 250 \
--learning_rate 3e-5 \
--lr_scheduler_type cosine_with_restarts \
--warmup_ratio 0.25 \
--cosine_cycles 1 \
--teacher bert-large-uncased-whole-word-masking-finetuned-squad \
--teacher_ratio 0.9 \
--num_train_epochs $NEPOCH \
--per_device_eval_batch_size 128 \
--per_device_train_batch_size 16 \
--max_seq_length 384 \
--doc_stride 128 \
--save_steps 250 \
--nncf_config $NNCF_CFG \
--logging_steps 1 \
--overwrite_output_dir \
--run_name $RUNID \
--output_dir $OUTDIR
```
# Eval
This repo must be cloned locally.
```bash
git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt
MODELROOT=/path/to/cloned_repo_above #to-revise
export CUDA_VISIBLE_DEVICES=0
OUTDIR=eval-bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt
WORKDIR=transformers/examples/pytorch/question-answering #to-revise
cd $WORKDIR
mkdir $OUTDIR
nohup python run_qa.py \
--model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \
--dataset_name squad \
--optimize_model_before_eval \
--qat_checkpoint $MODELROOT/checkpoint-22000 \
--nncf_config $MODELROOT/nncf_bert_squad_sparsity.json \
--to_onnx $OUTDIR/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt.onnx \
--do_eval \
--per_device_eval_batch_size 128 \
--max_seq_length 384 \
--doc_stride 128 \
--overwrite_output_dir \
--output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log &
```
|
vuiseng9/bert-base-squadv1-pruneofa-90pc-bt-qat-lt | 7bf8dffa024ab05b80105e2f05cd525e04500bda | 2022-01-19T19:13:40.000Z | [
"pytorch",
"onnx",
"bert",
"transformers"
] | null | false | vuiseng9 | null | vuiseng9/bert-base-squadv1-pruneofa-90pc-bt-qat-lt | 1 | null | transformers | 30,465 | This model is a downstream optimization of [```vuiseng9/bert-base-squadv1-pruneofa-90pc-bt```](https://huggingface.co/vuiseng9/bert-base-squadv1-pruneofa-90pc-bt) using [OpenVINO/NNCF](https://github.com/openvinotoolkit/nncf). Applied optimization includes:
1. magnitude sparsification at 0% upon initialization. Custom reverse masking and sparsity freezing are applied.
2. NNCF Quantize-Aware Training - Symmetric 8-bit for both weight and activation on all learnable layers.
3. Custom distillation with large model ```bert-large-uncased-whole-word-masking-finetuned-squad```
```
eval_exact_match = 80.6623
eval_f1 = 87.7147
eval_samples = 10784
```
# Setup
```bash
# OpenVINO/NNCF
git clone https://github.com/vuiseng9/nncf && cd nncf
git checkout tld-poc
git reset --hard 5647610d5ee2bf9f1324604e6579bca1c391e260
python setup.py develop
pip install -r examples/torch/requirements.txt
# Huggingface nn_pruning
git clone https://github.com/vuiseng9/nn_pruning && cd nn_pruning
git checkout reproduce-evaluation
git reset --hard 2d4e196d694c465e43e5fbce6c3836d0a60e1446
pip install -e ".[dev]"
# Huggingface Transformers
git clone https://github.com/vuiseng9/transformers && cd transformers
git checkout tld-poc
git reset --hard 5dd7402e9a316041dea4ff67508c01047323616e
pip install -e .
head -n 1 examples/pytorch/question-answering/requirements.txt | xargs -i pip install {}
# Additional dependencies
pip install onnx
```
# Train
```bash
wget https://huggingface.co/vuiseng9/bert-base-squadv1-pruneofa-90pc-bt-qat-lt/raw/main/nncf_bert_squad_sparsity.json
NNCF_CFG=/path/to/downloaded_nncf_cfg_above #to-revise
OUTROOT=/path/to/train_output_root #to-revise
WORKDIR=transformers/examples/pytorch/question-answering #to-revise
RUNID=bert-base-squadv1-pruneofa-90pc-bt-qat-lt
cd $WORKDIR
OUTDIR=$OUTROOT/$RUNID
mkdir -p $OUTDIR
export CUDA_VISIBLE_DEVICES=0
NEPOCH=5
python run_qa.py \
--model_name_or_path vuiseng9/bert-base-squadv1-pruneofa-90pc-bt \
--pruneofa_qat \
--dataset_name squad \
--do_eval \
--do_train \
--evaluation_strategy steps \
--eval_steps 250 \
--learning_rate 3e-5 \
--lr_scheduler_type cosine_with_restarts \
--warmup_ratio 0.25 \
--cosine_cycles 1 \
--teacher bert-large-uncased-whole-word-masking-finetuned-squad \
--teacher_ratio 0.9 \
--num_train_epochs $NEPOCH \
--per_device_eval_batch_size 128 \
--per_device_train_batch_size 16 \
--max_seq_length 384 \
--doc_stride 128 \
--save_steps 250 \
--nncf_config $NNCF_CFG \
--logging_steps 1 \
--overwrite_output_dir \
--run_name $RUNID \
--output_dir $OUTDIR
```
# Eval
This repo must be cloned locally.
```bash
git clone https://huggingface.co/vuiseng9/bert-base-squadv1-pruneofa-90pc-bt-qat-lt
MODELROOT=/path/to/cloned_repo_above #to-revise
export CUDA_VISIBLE_DEVICES=0
OUTDIR=eval-bert-base-squadv1-pruneofa-90pc-bt-qat-lt
WORKDIR=transformers/examples/pytorch/question-answering #to-revise
cd $WORKDIR
mkdir $OUTDIR
nohup python run_qa.py \
--model_name_or_path vuiseng9/bert-base-squadv1-pruneofa-90pc-bt \
--dataset_name squad \
--qat_checkpoint $MODELROOT/checkpoint-22000 \
--nncf_config $MODELROOT/nncf_bert_squad_sparsity.json \
--to_onnx $OUTDIR/bert-base-squadv1-pruneofa-90pc-bt-qat-lt.onnx \
--do_eval \
--per_device_eval_batch_size 128 \
--max_seq_length 384 \
--doc_stride 128 \
--overwrite_output_dir \
--output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log &
```
|
vuiseng9/bert-base-squadv1-qat-bt | 1ab83ad07ccb90df7acdb59b96ea0e19e54cc17e | 2022-01-19T19:09:40.000Z | [
"pytorch",
"onnx",
"bert",
"transformers"
] | null | false | vuiseng9 | null | vuiseng9/bert-base-squadv1-qat-bt | 1 | null | transformers | 30,466 | This model is a quantized-aware transfer learning of bert-base-uncased on Squadv1 using [OpenVINO/NNCF](https://github.com/openvinotoolkit/nncf). Applied optimization includes:
1. NNCF Quantize-Aware Training - Symmetric 8-bit for both weight and activation on all learnable layers.
2. Custom distillation with fine-tuned model [```csarron/bert-base-uncased-squad-v1```](https://huggingface.co/csarron/bert-base-uncased-squad-v1)
```
eval_exact_match = 80.8136
eval_f1 = 88.2594
eval_samples = 10784
```
# Setup
```bash
# OpenVINO/NNCF
git clone https://github.com/vuiseng9/nncf && cd nncf
git checkout tld-poc
git reset --hard 1dec7afe7a4b567c059fcf287ea2c234980fded2
python setup.py develop
pip install -r examples/torch/requirements.txt
# Huggingface nn_pruning
git clone https://github.com/vuiseng9/nn_pruning && cd nn_pruning
git checkout reproduce-evaluation
git reset --hard 2d4e196d694c465e43e5fbce6c3836d0a60e1446
pip install -e ".[dev]"
# Huggingface Transformers
git clone https://github.com/vuiseng9/transformers && cd transformers
git checkout tld-poc
git reset --hard 10a1e29d84484e48fd106f58957d9ffc89dc43c5
pip install -e .
head -n 1 examples/pytorch/question-answering/requirements.txt | xargs -i pip install {}
# Additional dependencies
pip install onnx
```
# Train
```bash
wget https://huggingface.co/vuiseng9/bert-base-squadv1-qat-bt/raw/main/nncf_bert_squad_qat.json
NNCF_CFG=/path/to/downloaded_nncf_cfg_above #to-revise
OUTROOT=/path/to/train_output_root #to-revise
WORKDIR=transformers/examples/pytorch/question-answering #to-revise
RUNID=bert-base-squadv1-qat-bt
cd $WORKDIR
OUTDIR=$OUTROOT/$RUNID
mkdir -p $OUTDIR
export CUDA_VISIBLE_DEVICES=0
NEPOCH=2
python run_qa.py \
--model_name_or_path bert-base-uncased \
--dataset_name squad \
--do_eval \
--do_train \
--evaluation_strategy steps \
--eval_steps 250 \
--learning_rate 3e-5 \
--lr_scheduler_type cosine_with_restarts \
--warmup_ratio 0.25 \
--cosine_cycles 1 \
--teacher csarron/bert-base-uncased-squad-v1 \
--teacher_ratio 0.9 \
--num_train_epochs $NEPOCH \
--per_device_eval_batch_size 128 \
--per_device_train_batch_size 16 \
--max_seq_length 384 \
--doc_stride 128 \
--save_steps 250 \
--nncf_config $NNCF_CFG \
--logging_steps 1 \
--overwrite_output_dir \
--run_name $RUNID \
--output_dir $OUTDIR
```
# Eval
This repo must be cloned locally.
```bash
git clone https://huggingface.co/vuiseng9/bert-base-squadv1-qat-bt
MODELROOT=/path/to/cloned_repo_above #to-revise
export CUDA_VISIBLE_DEVICES=0
OUTDIR=eval-bert-base-squadv1-qat-bt
WORKDIR=transformers/examples/pytorch/question-answering #to-revise
cd $WORKDIR
mkdir $OUTDIR
nohup python run_qa.py \
--model_name_or_path vuiseng9/bert-base-uncased-squad \
--dataset_name squad \
--qat_checkpoint $MODELROOT/checkpoint-10750 \
--nncf_config $MODELROOT/nncf_bert_squad_qat.json \
--to_onnx $OUTDIR/bert-base-squadv1-qat-bt.onnx \
--do_eval \
--per_device_eval_batch_size 128 \
--max_seq_length 384 \
--doc_stride 128 \
--overwrite_output_dir \
--output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log &
```
|
vuiseng9/bert-base-uncased-squadv1-52.0-sparse | e833fd491943acad8e9ee5f59c4c589a049df950 | 2021-11-11T18:14:37.000Z | [
"pytorch",
"tf",
"bert",
"transformers"
] | null | false | vuiseng9 | null | vuiseng9/bert-base-uncased-squadv1-52.0-sparse | 1 | null | transformers | 30,467 | * A set of unstructured sparse bert-base-uncased models fine-tuned for SQuADv1.
* Tensorflow models are created using ```TFAutoModelForQuestionAnswering.from_pretrained(..., from_pt=True)``` and ```model.save_pretrained(tf_pth)```.
* Observed issue - loss in model translation, discrepancy observed in evaluation between pytorch and tensorflow models.
* Table below is evaluated in HF's transformers v4.9.2. Sparsity is normalized to dense layers in attention heads and FFNN.
* Evaluation cli:
```bash
python run_qa.py \
--model_name_or_path <model identifier> \
--dataset_name squad \
--do_eval \
--per_device_eval_batch_size 384 \
--max_seq_length 68 \
--doc_stride 26 \
--output_dir /tmp/eval-squad
```
| | HF Model Hub Identifier | sparsity | em (pytorch) | em (tf) | f1 (pytorch) | f1 (tf) |
|---:|:------------------------------------------------------------------------------------------------------------------------|-----------:|---------------:|----------:|---------------:|----------:|
| 0 | [vuiseng9/bert-base-uncased-squadv1-85.4-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-85.4-sparse) | 85.4 | 69.9338 | 14.2573 | 77.6861 | 23.4917 |
| 1 | [vuiseng9/bert-base-uncased-squadv1-72.9-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-72.9-sparse) | 72.9 | 74.6358 | 31.0596 | 82.2555 | 39.8446 |
| 2 | [vuiseng9/bert-base-uncased-squadv1-65.1-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-65.1-sparse) | 65.1 | 76.1306 | 43.0274 | 83.4117 | 51.4300 |
| 3 | [vuiseng9/bert-base-uncased-squadv1-59.6-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-59.6-sparse) | 59.6 | 76.8590 | 50.4920 | 84.1267 | 59.0881 |
| 4 | [vuiseng9/bert-base-uncased-squadv1-52.0-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-52.0-sparse) | 52.0 | 78.0038 | 54.2857 | 85.2000 | 62.2914 | |
vuiseng9/bert-base-uncased-squadv1-59.6-sparse | f3741942661b7e94d817a0bc6b5dc07b93569f87 | 2021-11-11T18:13:58.000Z | [
"pytorch",
"tf",
"bert",
"transformers"
] | null | false | vuiseng9 | null | vuiseng9/bert-base-uncased-squadv1-59.6-sparse | 1 | null | transformers | 30,468 | * A set of unstructured sparse bert-base-uncased models fine-tuned for SQuADv1.
* Tensorflow models are created using ```TFAutoModelForQuestionAnswering.from_pretrained(..., from_pt=True)``` and ```model.save_pretrained(tf_pth)```.
* Observed issue - loss in model translation, discrepancy observed in evaluation between pytorch and tensorflow models.
* Table below is evaluated in HF's transformers v4.9.2. Sparsity is normalized to dense layers in attention heads and FFNN.
* Evaluation cli:
```bash
python run_qa.py \
--model_name_or_path <model identifier> \
--dataset_name squad \
--do_eval \
--per_device_eval_batch_size 384 \
--max_seq_length 68 \
--doc_stride 26 \
--output_dir /tmp/eval-squad
```
| | HF Model Hub Identifier | sparsity | em (pytorch) | em (tf) | f1 (pytorch) | f1 (tf) |
|---:|:------------------------------------------------------------------------------------------------------------------------|-----------:|---------------:|----------:|---------------:|----------:|
| 0 | [vuiseng9/bert-base-uncased-squadv1-85.4-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-85.4-sparse) | 85.4 | 69.9338 | 14.2573 | 77.6861 | 23.4917 |
| 1 | [vuiseng9/bert-base-uncased-squadv1-72.9-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-72.9-sparse) | 72.9 | 74.6358 | 31.0596 | 82.2555 | 39.8446 |
| 2 | [vuiseng9/bert-base-uncased-squadv1-65.1-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-65.1-sparse) | 65.1 | 76.1306 | 43.0274 | 83.4117 | 51.4300 |
| 3 | [vuiseng9/bert-base-uncased-squadv1-59.6-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-59.6-sparse) | 59.6 | 76.8590 | 50.4920 | 84.1267 | 59.0881 |
| 4 | [vuiseng9/bert-base-uncased-squadv1-52.0-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-52.0-sparse) | 52.0 | 78.0038 | 54.2857 | 85.2000 | 62.2914 | |
vuiseng9/bert-base-uncased-squadv1-65.1-sparse | 101dba914e94ac48d3d5cfd4daa33ab633f85391 | 2021-11-11T18:13:39.000Z | [
"pytorch",
"tf",
"bert",
"transformers"
] | null | false | vuiseng9 | null | vuiseng9/bert-base-uncased-squadv1-65.1-sparse | 1 | null | transformers | 30,469 | * A set of unstructured sparse bert-base-uncased models fine-tuned for SQuADv1.
* Tensorflow models are created using ```TFAutoModelForQuestionAnswering.from_pretrained(..., from_pt=True)``` and ```model.save_pretrained(tf_pth)```.
* Observed issue - loss in model translation, discrepancy observed in evaluation between pytorch and tensorflow models.
* Table below is evaluated in HF's transformers v4.9.2. Sparsity is normalized to dense layers in attention heads and FFNN.
* Evaluation cli:
```bash
python run_qa.py \
--model_name_or_path <model identifier> \
--dataset_name squad \
--do_eval \
--per_device_eval_batch_size 384 \
--max_seq_length 68 \
--doc_stride 26 \
--output_dir /tmp/eval-squad
```
| | HF Model Hub Identifier | sparsity | em (pytorch) | em (tf) | f1 (pytorch) | f1 (tf) |
|---:|:------------------------------------------------------------------------------------------------------------------------|-----------:|---------------:|----------:|---------------:|----------:|
| 0 | [vuiseng9/bert-base-uncased-squadv1-85.4-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-85.4-sparse) | 85.4 | 69.9338 | 14.2573 | 77.6861 | 23.4917 |
| 1 | [vuiseng9/bert-base-uncased-squadv1-72.9-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-72.9-sparse) | 72.9 | 74.6358 | 31.0596 | 82.2555 | 39.8446 |
| 2 | [vuiseng9/bert-base-uncased-squadv1-65.1-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-65.1-sparse) | 65.1 | 76.1306 | 43.0274 | 83.4117 | 51.4300 |
| 3 | [vuiseng9/bert-base-uncased-squadv1-59.6-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-59.6-sparse) | 59.6 | 76.8590 | 50.4920 | 84.1267 | 59.0881 |
| 4 | [vuiseng9/bert-base-uncased-squadv1-52.0-sparse](https://huggingface.co/vuiseng9/bert-base-uncased-squadv1-52.0-sparse) | 52.0 | 78.0038 | 54.2857 | 85.2000 | 62.2914 | |
vuiseng9/pegasus-billsum | 4dc4997f8b8ce742fd3e51df1390c499a9548d68 | 2021-12-21T01:41:33.000Z | [
"pytorch",
"pegasus",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | vuiseng9 | null | vuiseng9/pegasus-billsum | 1 | null | transformers | 30,470 | This model is developed with transformers v4.13 with minor patch in this [fork](https://github.com/vuiseng9/transformers/tree/pegasus-v4p13).
# Setup
```bash
git clone https://github.com/vuiseng9/transformers
cd transformers
git checkout pegasus-v4p13 && git reset --hard 41eeb07
# installation, set summarization dependency
# . . .
```
# Train
```bash
#!/usr/bin/env bash
export CUDA_VISIBLE_DEVICES=0,1,2,3
NEPOCH=10
RUNID=pegasus-billsum-${NEPOCH}eph-run1
OUTDIR=/data1/vchua/pegasus-hf4p13/pegasus/${RUNID}
mkdir -p $OUTDIR
nohup python run_summarization.py \
--model_name_or_path google/pegasus-large \
--dataset_name billsum \
--do_train \
--adafactor \
--learning_rate 2e-4 \
--label_smoothing_factor 0.1 \
--num_train_epochs $NEPOCH \
--per_device_train_batch_size 2 \
--do_eval \
--per_device_eval_batch_size 2 \
--num_beams 8 \
--max_source_length 1024 \
--max_target_length 256 \
--evaluation_strategy steps \
--eval_steps 1000 \
--save_strategy steps \
--save_steps 2000 \
--logging_steps 1 \
--overwrite_output_dir \
--run_name $RUNID \
--output_dir $OUTDIR > $OUTDIR/run.log 2>&1 &
```
# Eval
```bash
#!/usr/bin/env bash
export CUDA_VISIBLE_DEVICES=3
DT=$(date +%F_%H-%M)
RUNID=pegasus-billsum-${DT}
OUTDIR=/data1/vchua/pegasus-hf4p13/pegasus-test/${RUNID}
mkdir -p $OUTDIR
nohup python run_summarization.py \
--model_name_or_path vuiseng9/pegasus-billsum \
--dataset_name billsum \
--max_source_length 1024 \
--max_target_length 256 \
--do_predict \
--per_device_eval_batch_size 8 \
--predict_with_generate \
--num_beams 8 \
--overwrite_output_dir \
--run_name $RUNID \
--output_dir $OUTDIR > $OUTDIR/run.log 2>&1 &
```
Although fine-tuning is carried out for 10 epochs, this model is the checkpoint (@12000 steps, 6.6epoch, 210mins) with lowest eval loss during training. Test/predict with this checkpoint should give results below.
```
***** predict metrics *****
predict_gen_len = 179.7363
predict_loss = 1.2452
predict_rouge1 = 56.8657
predict_rouge2 = 38.6531
predict_rougeL = 44.8399
predict_rougeLsum = 51.6266
predict_runtime = 1:19:28.20
predict_samples = 3269
predict_samples_per_second = 0.686
predict_steps_per_second = 0.086
``` |
vutankiet2901/wav2vec2-xls-r-1b-ja | 2c4a1631f81d4c13742d8b8250db5582e6accf7a | 2022-03-23T18:34:17.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"ja",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"common-voice",
"hf-asr-leaderboard",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | vutankiet2901 | null | vutankiet2901/wav2vec2-xls-r-1b-ja | 1 | null | transformers | 30,471 | ---
license: apache-2.0
language:
- ja
tags:
- automatic-speech-recognition
- common-voice
- hf-asr-leaderboard
- ja
- robust-speech-event
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-xls-r-1b
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 7.0
type: mozilla-foundation/common_voice_7_0
args: ja
metrics:
- name: Test WER (with LM)
type: wer
value: 11.77
- name: Test CER (with LM)
type: cer
value: 5.22
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8.0
type: mozilla-foundation/common_voice_8_0
args: ja
metrics:
- name: Test WER (with LM)
type: wer
value: 12.23
- name: Test CER (with LM)
type: cer
value: 5.33
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: ja
metrics:
- name: Test WER (with LM)
type: wer
value: 29.35
- name: Test CER (with LM)
type: cer
value: 16.43
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Test Data
type: speech-recognition-community-v2/eval_data
args: ja
metrics:
- name: Test CER
type: cer
value: 19.48
---
## Model description
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - JA
### Benchmark WER result:
| | [COMMON VOICE 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0) | [COMMON VOICE 8.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_8_0)
|---|---|---|
|without LM| 16.97 | 17.95 |
|with 4-grams LM| 11.77 | 12.23|
### Benchmark CER result:
| | [COMMON VOICE 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0) | [COMMON VOICE 8.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_8_0)
|---|---|---|
|without LM| 6.82 | 7.05 |
|with 4-grams LM| 5.22 | 5.33 |
## Evaluation
Please use the eval.py file to run the evaluation:
```python
pip install mecab-python3 unidic-lite pykakasi
python eval.py --model_id vutankiet2901/wav2vec2-xls-r-1b-ja --dataset mozilla-foundation/common_voice_8_0 --config ja --split test --log_outputs
```
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 2000
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer | Cer |
|:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|
| 3.484 | 9.49 | 1500 | 1.1849 | 0.7543 | 0.4099 |
| 1.3582 | 18.98 | 3000 | 0.4320 | 0.3489 | 0.1591 |
| 1.1716 | 28.48 | 4500 | 0.3835 | 0.3175 | 0.1454 |
| 1.0951 | 37.97 | 6000 | 0.3732 | 0.3033 | 0.1405 |
| 1.04 | 47.47 | 7500 | 0.3485 | 0.2898 | 0.1360 |
| 0.9768 | 56.96 | 9000 | 0.3386 | 0.2787 | 0.1309 |
| 0.9129 | 66.45 | 10500 | 0.3363 | 0.2711 | 0.1272 |
| 0.8614 | 75.94 | 12000 | 0.3386 | 0.2676 | 0.1260 |
| 0.8092 | 85.44 | 13500 | 0.3356 | 0.2610 | 0.1240 |
| 0.7658 | 94.93 | 15000 | 0.3316 | 0.2564 | 0.1218 |
### Framework versions
- Transformers 4.16.0.dev0
- Pytorch 1.10.1+cu102
- Datasets 1.18.3
- Tokenizers 0.11.0
|
w11wo/javanese-distilbert-small-imdb | 969240eb0f9966c1b304aa8463b150571409a6fa | 2022-02-14T16:18:45.000Z | [
"pytorch",
"tf",
"distilbert",
"fill-mask",
"jv",
"dataset:w11wo/imdb-javanese",
"arxiv:1910.01108",
"transformers",
"javanese-distilbert-small-imdb",
"license:mit",
"autotrain_compatible"
] | fill-mask | false | w11wo | null | w11wo/javanese-distilbert-small-imdb | 1 | null | transformers | 30,472 | ---
language: jv
tags:
- javanese-distilbert-small-imdb
license: mit
datasets:
- w11wo/imdb-javanese
widget:
- text: "Film favoritku yaiku Interstellar [MASK] Christopher Nolan."
---
## Javanese DistilBERT Small IMDB
Javanese DistilBERT Small IMDB is a masked language model based on the [DistilBERT model](https://arxiv.org/abs/1910.01108). It was trained on Javanese IMDB movie reviews.
The model was originally the pretrained [Javanese DistilBERT Small model](https://huggingface.co/w11wo/javanese-distilbert-small) and is later fine-tuned on the Javanese IMDB movie review dataset. It achieved a perplexity of 21.01 on the validation dataset. Many of the techniques used are based on a Hugging Face tutorial [notebook](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling.ipynb) written by [Sylvain Gugger](https://github.com/sgugger).
Hugging Face's `Trainer` class from the [Transformers](https://huggingface.co/transformers) library was used to train the model. PyTorch was used as the backend framework during training, but the model remains compatible with TensorFlow nonetheless.
## Model
| Model | #params | Arch. | Training/Validation data (text) |
|----------------------------------|----------|----------------------|---------------------------------|
| `javanese-distilbert-small-imdb` | 66M | DistilBERT Small | Javanese IMDB (47.5 MB of text) |
## Evaluation Results
The model was trained for 5 epochs and the following is the final result once the training ended.
| train loss | valid loss | perplexity | total time |
|------------|------------|------------|-------------|
| 3.126 | 3.039 | 21.01 | 5:6:4 |
## How to Use
### As Masked Language Model
```python
from transformers import pipeline
pretrained_name = "w11wo/javanese-distilbert-small-imdb"
fill_mask = pipeline(
"fill-mask",
model=pretrained_name,
tokenizer=pretrained_name
)
fill_mask("Aku mangan sate ing [MASK] bareng konco-konco")
```
### Feature Extraction in PyTorch
```python
from transformers import DistilBertModel, DistilBertTokenizerFast
pretrained_name = "w11wo/javanese-distilbert-small-imdb"
model = DistilBertModel.from_pretrained(pretrained_name)
tokenizer = DistilBertTokenizerFast.from_pretrained(pretrained_name)
prompt = "Indonesia minangka negara gedhe."
encoded_input = tokenizer(prompt, return_tensors='pt')
output = model(**encoded_input)
```
## Disclaimer
Do consider the biases which came from the IMDB review that may be carried over into the results of this model.
## Author
Javanese DistilBERT Small was trained and evaluated by [Wilson Wongso](https://w11wo.github.io/). All computation and development are done on Google Colaboratory using their free GPU access.
## Citation
If you use any of our models in your research, please cite:
```bib
@inproceedings{wongso2021causal,
title={Causal and Masked Language Modeling of Javanese Language using Transformer-based Architectures},
author={Wongso, Wilson and Setiawan, David Samuel and Suhartono, Derwin},
booktitle={2021 International Conference on Advanced Computer Science and Information Systems (ICACSIS)},
pages={1--7},
year={2021},
organization={IEEE}
}
```
|
w11wo/javanese-distilbert-small | 5393d48a83e961b631c3d695cc84d54cd0903a2e | 2022-02-14T16:18:34.000Z | [
"pytorch",
"tf",
"distilbert",
"fill-mask",
"jv",
"dataset:wikipedia",
"arxiv:1910.01108",
"transformers",
"javanese-distilbert-small",
"license:mit",
"autotrain_compatible"
] | fill-mask | false | w11wo | null | w11wo/javanese-distilbert-small | 1 | null | transformers | 30,473 | ---
language: jv
tags:
- javanese-distilbert-small
license: mit
datasets:
- wikipedia
widget:
- text: "Joko [MASK] wis kelas siji SMA."
---
## Javanese DistilBERT Small
Javanese DistilBERT Small is a masked language model based on the [DistilBERT model](https://arxiv.org/abs/1910.01108). It was trained on the latest (late December 2020) Javanese Wikipedia articles.
The model was originally HuggingFace's pretrained [English DistilBERT model](https://huggingface.co/distilbert-base-uncased) and is later fine-tuned on the Javanese dataset. It achieved a perplexity of 23.54 on the validation dataset (20% of the articles). Many of the techniques used are based on a Hugging Face tutorial [notebook](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling.ipynb) written by [Sylvain Gugger](https://github.com/sgugger), and [fine-tuning tutorial notebook](https://github.com/piegu/fastai-projects/blob/master/finetuning-English-GPT2-any-language-Portuguese-HuggingFace-fastaiv2.ipynb) written by [Pierre Guillou](https://huggingface.co/pierreguillou).
Hugging Face's [Transformers](https://huggingface.co/transformers) library was used to train the model -- utilizing the base DistilBERT model and their `Trainer` class. PyTorch was used as the backend framework during training, but the model remains compatible with TensorFlow nonetheless.
## Model
| Model | #params | Arch. | Training/Validation data (text) |
|-----------------------------|---------|------------------|-------------------------------------|
| `javanese-distilbert-small` | 66M | DistilBERT Small | Javanese Wikipedia (319 MB of text) |
## Evaluation Results
The model was trained for 5 epochs and the following is the final result once the training ended.
| train loss | valid loss | perplexity | total time |
|------------|------------|------------|------------|
| 3.088 | 3.153 | 23.54 | 1:46:37 |
## How to Use
### As Masked Language Model
```python
from transformers import pipeline
pretrained_name = "w11wo/javanese-distilbert-small"
fill_mask = pipeline(
"fill-mask",
model=pretrained_name,
tokenizer=pretrained_name
)
fill_mask("Aku mangan sate ing [MASK] bareng konco-konco")
```
### Feature Extraction in PyTorch
```python
from transformers import DistilBertModel, DistilBertTokenizerFast
pretrained_name = "w11wo/javanese-distilbert-small"
model = DistilBertModel.from_pretrained(pretrained_name)
tokenizer = DistilBertTokenizerFast.from_pretrained(pretrained_name)
prompt = "Indonesia minangka negara gedhe."
encoded_input = tokenizer(prompt, return_tensors='pt')
output = model(**encoded_input)
```
## Disclaimer
Do remember that although the dataset originated from Wikipedia, the model may not always generate factual texts. Additionally, the biases which came from the Wikipedia articles may be carried over into the results of this model.
## Author
Javanese DistilBERT Small was trained and evaluated by [Wilson Wongso](https://w11wo.github.io/). All computation and development are done on Google Colaboratory using their free GPU access.
## Citation
If you use any of our models in your research, please cite:
```bib
@inproceedings{wongso2021causal,
title={Causal and Masked Language Modeling of Javanese Language using Transformer-based Architectures},
author={Wongso, Wilson and Setiawan, David Samuel and Suhartono, Derwin},
booktitle={2021 International Conference on Advanced Computer Science and Information Systems (ICACSIS)},
pages={1--7},
year={2021},
organization={IEEE}
}
```
|
w11wo/javanese-roberta-small | 785f5a75ba398e554c999376da26e5b1ae978b3b | 2022-02-14T16:17:41.000Z | [
"pytorch",
"tf",
"jax",
"roberta",
"fill-mask",
"jv",
"dataset:wikipedia",
"arxiv:1907.11692",
"transformers",
"javanese-roberta-small",
"license:mit",
"autotrain_compatible"
] | fill-mask | false | w11wo | null | w11wo/javanese-roberta-small | 1 | null | transformers | 30,474 | ---
language: jv
tags:
- javanese-roberta-small
license: mit
datasets:
- wikipedia
widget:
- text: "Ing mangsa rendheng awakedhewe kudu pinter njaga <mask>."
---
## Javanese RoBERTa Small
Javanese RoBERTa Small is a masked language model based on the [RoBERTa model](https://arxiv.org/abs/1907.11692). It was trained on the latest (late December 2020) Javanese Wikipedia articles.
The model was originally HuggingFace's pretrained [English RoBERTa model](https://huggingface.co/roberta-base) and is later fine-tuned on the Javanese dataset. It achieved a perplexity of 33.30 on the validation dataset (20% of the articles). Many of the techniques used are based on a Hugging Face tutorial [notebook](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling.ipynb) written by [Sylvain Gugger](https://github.com/sgugger), and [fine-tuning tutorial notebook](https://github.com/piegu/fastai-projects/blob/master/finetuning-English-GPT2-any-language-Portuguese-HuggingFace-fastaiv2.ipynb) written by [Pierre Guillou](https://huggingface.co/pierreguillou).
Hugging Face's [Transformers](https://huggingface.co/transformers) library was used to train the model -- utilizing the base RoBERTa model and their `Trainer` class. PyTorch was used as the backend framework during training, but the model remains compatible with TensorFlow nonetheless.
## Model
| Model | #params | Arch. | Training/Validation data (text) |
|--------------------------|---------|----------|-------------------------------------|
| `javanese-roberta-small` | 124M | RoBERTa | Javanese Wikipedia (319 MB of text) |
## Evaluation Results
The model was trained for 5 epochs and the following is the final result once the training ended.
| train loss | valid loss | perplexity | total time |
|------------|------------|------------|------------|
| 3.481 | 3.506 | 33.30 | 1:11:43 |
## How to Use
### As Masked Language Model
```python
from transformers import pipeline
pretrained_name = "w11wo/javanese-roberta-small"
fill_mask = pipeline(
"fill-mask",
model=pretrained_name,
tokenizer=pretrained_name
)
fill_mask("Meja lan kursine lagi <mask>.")
```
### Feature Extraction in PyTorch
```python
from transformers import RobertaModel, RobertaTokenizerFast
pretrained_name = "w11wo/javanese-roberta-small"
model = RobertaModel.from_pretrained(pretrained_name)
tokenizer = RobertaTokenizerFast.from_pretrained(pretrained_name)
prompt = "Indonesia minangka negara gedhe."
encoded_input = tokenizer(prompt, return_tensors='pt')
output = model(**encoded_input)
```
## Disclaimer
Do remember that although the dataset originated from Wikipedia, the model may not always generate factual texts. Additionally, the biases which came from the Wikipedia articles may be carried over into the results of this model.
## Author
Javanese RoBERTa Small was trained and evaluated by [Wilson Wongso](https://w11wo.github.io/). All computation and development are done on Google Colaboratory using their free GPU access.
## Citation
If you use any of our models in your research, please cite:
```bib
@inproceedings{wongso2021causal,
title={Causal and Masked Language Modeling of Javanese Language using Transformer-based Architectures},
author={Wongso, Wilson and Setiawan, David Samuel and Suhartono, Derwin},
booktitle={2021 International Conference on Advanced Computer Science and Information Systems (ICACSIS)},
pages={1--7},
year={2021},
organization={IEEE}
}
``` |
w11wo/lao-roberta-base | 4eb8f4edd99d33a178c00f5b04f615205e18031d | 2021-12-05T15:55:09.000Z | [
"pytorch",
"tensorboard",
"roberta",
"fill-mask",
"lo",
"dataset:oscar-corpus/OSCAR-2109",
"arxiv:1907.11692",
"transformers",
"lao-roberta-base",
"license:mit",
"autotrain_compatible"
] | fill-mask | false | w11wo | null | w11wo/lao-roberta-base | 1 | 1 | transformers | 30,475 | ---
language: lo
tags:
- lao-roberta-base
license: mit
datasets:
- oscar-corpus/OSCAR-2109
---
## Lao RoBERTa Base
Lao RoBERTa Base is a masked language model based on the [RoBERTa](https://arxiv.org/abs/1907.11692) model. It was trained on the [OSCAR-2109](https://huggingface.co/datasets/oscar-corpus/OSCAR-2109) dataset, specifically the `deduplicated_lo` subset. The model was trained from scratch and achieved an evaluation loss of 1.4556 and an evaluation perplexity of 4.287.
This model was trained using HuggingFace's PyTorch framework and the training script found [here](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_mlm.py). All training was done on a TPUv3-8, provided by the [TPU Research Cloud](https://sites.research.google/trc/about/) program. You can view the detailed training results in the [Training metrics](https://huggingface.co/w11wo/lao-roberta-base/tensorboard) tab, logged via Tensorboard.
## Model
| Model | #params | Arch. | Training/Validation data (text) |
| ------------------ | ------- | ------- | ------------------------------------ |
| `lao-roberta-base` | 124M | RoBERTa | OSCAR-2109 `deduplicated_lo` Dataset |
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0002
- train_batch_size: 128
- eval_batch_size: 128
- seed: 42
- distributed_type: tpu
- num_devices: 8
- total_train_batch_size: 1024
- total_eval_batch_size: 1024
- optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 30.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
| :-----------: | :---: | :--: | :-------------: |
| No log | 1.0 | 216 | 5.8586 |
| No log | 2.0 | 432 | 5.5095 |
| 6.688 | 3.0 | 648 | 5.3976 |
| 6.688 | 4.0 | 864 | 5.3562 |
| 5.3629 | 5.0 | 1080 | 5.2912 |
| 5.3629 | 6.0 | 1296 | 5.2385 |
| 5.22 | 7.0 | 1512 | 5.1955 |
| 5.22 | 8.0 | 1728 | 5.1785 |
| 5.22 | 9.0 | 1944 | 5.1327 |
| 5.1248 | 10.0 | 2160 | 5.1243 |
| 5.1248 | 11.0 | 2376 | 5.0889 |
| 5.0591 | 12.0 | 2592 | 5.0732 |
| 5.0591 | 13.0 | 2808 | 5.0417 |
| 5.0094 | 14.0 | 3024 | 5.0388 |
| 5.0094 | 15.0 | 3240 | 4.9299 |
| 5.0094 | 16.0 | 3456 | 4.2991 |
| 4.7527 | 17.0 | 3672 | 3.6541 |
| 4.7527 | 18.0 | 3888 | 2.7826 |
| 3.4431 | 19.0 | 4104 | 2.2796 |
| 3.4431 | 20.0 | 4320 | 2.0213 |
| 2.2803 | 21.0 | 4536 | 1.8809 |
| 2.2803 | 22.0 | 4752 | 1.7615 |
| 2.2803 | 23.0 | 4968 | 1.6925 |
| 1.8601 | 24.0 | 5184 | 1.6205 |
| 1.8601 | 25.0 | 5400 | 1.5751 |
| 1.6697 | 26.0 | 5616 | 1.5391 |
| 1.6697 | 27.0 | 5832 | 1.5200 |
| 1.5655 | 28.0 | 6048 | 1.4866 |
| 1.5655 | 29.0 | 6264 | 1.4656 |
| 1.5655 | 30.0 | 6480 | 1.4627 |
## How to Use
### As Masked Language Model
```python
from transformers import pipeline
pretrained_name = "w11wo/lao-roberta-base"
prompt = "REPLACE WITH MASKED PROMPT"
fill_mask = pipeline(
"fill-mask",
model=pretrained_name,
tokenizer=pretrained_name
)
fill_mask(prompt)
```
### Feature Extraction in PyTorch
```python
from transformers import RobertaModel, RobertaTokenizerFast
pretrained_name = "w11wo/lao-roberta-base"
model = RobertaModel.from_pretrained(pretrained_name)
tokenizer = RobertaTokenizerFast.from_pretrained(pretrained_name)
prompt = "ສະບາຍດີຊາວໂລກ."
encoded_input = tokenizer(prompt, return_tensors='pt')
output = model(**encoded_input)
```
## Disclaimer
Do consider the biases which came from pre-training datasets that may be carried over into the results of this model.
## Author
Lao RoBERTa Base was trained and evaluated by [Wilson Wongso](https://w11wo.github.io/). All computation and development are done on Google's TPU-RC.
## Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.9.0+cu102
- Datasets 1.16.1
- Tokenizers 0.10.3
|
wangj2/domaingen | 0866327f5c582d4b73604ecf450b75789714f38b | 2021-05-23T13:46:02.000Z | [
"pytorch",
"jax",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | wangj2 | null | wangj2/domaingen | 1 | null | transformers | 30,476 | Entry not found |
wangst/dummy-model | 200585467ec0f0df8c25a56cf2d2746e68475517 | 2021-11-19T18:24:06.000Z | [
"pytorch",
"camembert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | wangst | null | wangst/dummy-model | 1 | null | transformers | 30,477 | Entry not found |
wbmitcast/mymodel005 | e4c8b2d684912f6c4c8175e08084c44bc4388b6b | 2021-10-29T02:23:30.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | wbmitcast | null | wbmitcast/mymodel005 | 1 | null | transformers | 30,478 | Entry not found |
wbmitcast/mymodel007 | 2ac7bca70e71a205a903ba2524198f3c504986de | 2021-11-02T03:52:20.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | wbmitcast | null | wbmitcast/mymodel007 | 1 | null | transformers | 30,479 | Entry not found |
weixyan/codegpt_py150 | cf0e250d9efc3b2dfd98f5fdead8b53c4f5fc06c | 2021-08-30T09:26:16.000Z | [
"pytorch"
] | null | false | weixyan | null | weixyan/codegpt_py150 | 1 | null | null | 30,480 | Entry not found |
wesam266/px | b46ec223cef7737ff22b8a50ea4b02326c24c5b4 | 2022-01-23T09:47:46.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | wesam266 | null | wesam266/px | 1 | null | transformers | 30,481 | Entry not found |
widyanto/IndoT5-small-qg | 09f73fd84e15c75fe0a02bc42369a6c627289c9d | 2021-08-24T00:53:22.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | widyanto | null | widyanto/IndoT5-small-qg | 1 | null | transformers | 30,482 | Entry not found |
willemjan/eng | 8a4584a6b68b2dfe4687341e2e19d22da1c97dc1 | 2022-02-07T09:23:20.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"license:cc-by-nc-sa-3.0",
"autotrain_compatible"
] | fill-mask | false | willemjan | null | willemjan/eng | 1 | null | transformers | 30,483 | ---
license: cc-by-nc-sa-3.0
---
|
willemjan/nl2 | 706c0f3cc856fea25fd865d248e162c4235a32a8 | 2022-02-07T08:52:58.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"license:cc-by-nc-3.0",
"autotrain_compatible"
] | fill-mask | false | willemjan | null | willemjan/nl2 | 1 | null | transformers | 30,484 | ---
license: cc-by-nc-3.0
---
|
wilsontam/bert-base-uncased-dstc9 | 0c61b9deb8374ce9ec5aaa7acd346a44aa57fbe9 | 2021-12-26T14:00:21.000Z | [
"pytorch",
"bert",
"fill-mask",
"en",
"transformers",
"dstc10",
"autotrain_compatible"
] | fill-mask | false | wilsontam | null | wilsontam/bert-base-uncased-dstc9 | 1 | null | transformers | 30,485 | ---
language: "en"
tags:
- dstc10
widget:
- text: "Can you accommodate large [MASK] ?"
---
# Goal
This Bert model is trained using DSTC9 training + validation data for dialogue modeling purpose.
Data link: https://github.com/alexa/alexa-with-dstc9-track1-dataset
Credit: Shuhan Yuan, Wilson Tam |
wjc123/dobule_klue | 0e54ae0af8b929ccfcb2badf3b698d5bfca141b8 | 2021-12-06T10:54:19.000Z | [
"pytorch",
"encoder-decoder",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | wjc123 | null | wjc123/dobule_klue | 1 | null | transformers | 30,486 | Entry not found |
wjc123/double_klue2 | 6c57b314b71e7fc270e026106399dfb777e32881 | 2021-12-07T13:59:14.000Z | [
"pytorch",
"encoder-decoder",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | wjc123 | null | wjc123/double_klue2 | 1 | null | transformers | 30,487 | Entry not found |
wtrClover/DialoGPT-small-Flutterbot | 334497bc5ce9c3f042729cc9c84e7dcd26c6be7f | 2022-01-27T23:38:14.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | wtrClover | null | wtrClover/DialoGPT-small-Flutterbot | 1 | null | transformers | 30,488 | ---
tags:
- conversational
---
# MLP DialoGPT Model based on Fluttershy |
wudi7758521521/kaikai_model2 | 3f2a899e125eec4ee285ef1b671bb77a2e24345e | 2021-07-18T02:54:51.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | wudi7758521521 | null | wudi7758521521/kaikai_model2 | 1 | null | transformers | 30,489 | Entry not found |
wuyanzu/2022_02_10 | 313b59e3dc4fa1c3e64ac4945eeff8ceed18f327 | 2022-02-10T13:32:43.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | wuyanzu | null | wuyanzu/2022_02_10 | 1 | null | transformers | 30,490 | Entry not found |
x10ng/gpt2-wikitext2 | 61b0fcc7a781902770daff05ca4e47aba961a2c1 | 2022-01-10T16:00:26.000Z | [
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | x10ng | null | x10ng/gpt2-wikitext2 | 1 | null | transformers | 30,491 | Entry not found |
xdwang/tmp | 86f254b76334b658219d6219c11594ecc07540a8 | 2022-02-07T04:27:52.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | xdwang | null | xdwang/tmp | 1 | null | transformers | 30,492 | Entry not found |
McGill-NLP/electra-medal | 870a3ddd48d0f2e48fd7f45e066993736981e7a7 | 2020-11-16T18:44:46.000Z | [
"pytorch",
"tf",
"electra",
"feature-extraction",
"transformers"
] | feature-extraction | false | McGill-NLP | null | McGill-NLP/electra-medal | 1 | null | transformers | 30,493 | Entry not found |
xhluca/tapas-nq-hn-retriever-large-0 | 7e5da2904f8e71fc476fbb6deeac4386eb998ee8 | 2022-02-10T03:39:57.000Z | [
"pytorch",
"tapas",
"feature-extraction",
"transformers"
] | feature-extraction | false | xhluca | null | xhluca/tapas-nq-hn-retriever-large-0 | 1 | null | transformers | 30,494 | Entry not found |
xhluca/tapas-nq-hn-retriever-large-1 | 32d9c42cec8984c3158561c8dabbd0132827bfe7 | 2022-02-10T03:40:23.000Z | [
"pytorch",
"tapas",
"feature-extraction",
"transformers"
] | feature-extraction | false | xhluca | null | xhluca/tapas-nq-hn-retriever-large-1 | 1 | null | transformers | 30,495 | Entry not found |
xhluca/tapas-nq-hn-retriever-medium-0 | bbdf66f383fc082b145b62955e4a467eb662894b | 2022-02-10T02:48:54.000Z | [
"pytorch",
"tapas",
"feature-extraction",
"transformers"
] | feature-extraction | false | xhluca | null | xhluca/tapas-nq-hn-retriever-medium-0 | 1 | null | transformers | 30,496 | Entry not found |
xhyi/PT_GPTNEO125_ATG | 990b3a764568de56cb4153362cdd17f60463b7b2 | 2021-09-02T18:03:50.000Z | [
"pytorch",
"gpt_neo",
"text-generation",
"transformers"
] | text-generation | false | xhyi | null | xhyi/PT_GPTNEO125_ATG | 1 | null | transformers | 30,497 | Entry not found |
xhyi/distilLED3_08_31_2021_v5 | f02048cf4ccf3de7536dddd07bdbfeb81614823d | 2021-09-02T01:44:58.000Z | [
"pytorch",
"led",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | xhyi | null | xhyi/distilLED3_08_31_2021_v5 | 1 | null | transformers | 30,498 | \nTraining Loss Validation Loss Rouge2 Precision Rouge2 Recall Rouge2 Fmeasure
2.880900 2.715085 0.121400 0.142300 0.117100
+200 steps
total = 440 steps
tokenization:
max article: 8192
max abstract: 512 |
xiejiafang/bert_finetuning_test | c8ec728d13f32673807abac48f1717b9f544fb36 | 2021-07-18T02:37:10.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | xiejiafang | null | xiejiafang/bert_finetuning_test | 1 | null | transformers | 30,499 | Entry not found |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.