modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
DeskDown/MarianMixFT_en-ms | 485eace2ea9655bcd97f3f79c08c88dbdc17741f | 2022-01-15T00:24:58.000Z | [
"pytorch",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DeskDown | null | DeskDown/MarianMixFT_en-ms | 2 | null | transformers | 23,000 | Entry not found |
DeskDown/MarianMixFT_en-th | 90cbdbc06354fcd01290e3557fa5750f20e3d8cb | 2022-01-14T19:34:06.000Z | [
"pytorch",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DeskDown | null | DeskDown/MarianMixFT_en-th | 2 | null | transformers | 23,001 | Entry not found |
DeskDown/MarianMix_en-zh_to_vi-ms-hi-ja | c804ffb8de64463293b59fee223400e3c47ff5f5 | 2022-01-12T14:11:06.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DeskDown | null | DeskDown/MarianMix_en-zh_to_vi-ms-hi-ja | 2 | null | transformers | 23,002 | Entry not found |
Dilmk2/DialoGPT-small-harrypotter | 3c5157e1bf282c58ce24939e9b15f290a030d04a | 2021-08-26T16:56:13.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Dilmk2 | null | Dilmk2/DialoGPT-small-harrypotter | 2 | null | transformers | 23,003 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
DimaOrekhov/transformer-method-name | b195b4ac1d539ed231364c9f5884c83674029a4b | 2020-12-28T00:39:31.000Z | [
"pytorch",
"encoder-decoder",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DimaOrekhov | null | DimaOrekhov/transformer-method-name | 2 | null | transformers | 23,004 | Entry not found |
Dongmin/testmodel | 9f7173e20ce3e4751cb6a8a7b74a25851cba9a40 | 2021-09-10T08:34:55.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Dongmin | null | Dongmin/testmodel | 2 | 1 | transformers | 23,005 | Entry not found |
Doogie/Wayne_NLP_mT5 | a645ba0f569b5b20de6aa220407335f4a87a0efb | 2022-03-24T02:02:30.000Z | [
"pytorch",
"tensorboard",
"mt5",
"text2text-generation",
"dataset:cnn_dailymail",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | Doogie | null | Doogie/Wayne_NLP_mT5 | 2 | null | transformers | 23,006 | ---
tags:
- generated_from_trainer
datasets:
- cnn_dailymail
model-index:
- name: Wayne_NLP_mT5
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Wayne_NLP_mT5
This model was trained only english datasets.
if you want trained korean + english model
go to wayne_mulang_mT5.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 10
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0a0+3fd9dcf
- Datasets 1.18.3
- Tokenizers 0.11.0
|
Doogie/ke-t5-base-ko-AIHub-paper-summary | 979c9f32bbbfcecf7ce5bf1d47770bd83f6e6f09 | 2021-12-27T08:03:16.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Doogie | null | Doogie/ke-t5-base-ko-AIHub-paper-summary | 2 | null | transformers | 23,007 | Entry not found |
Doohae/roberta | 9e73137a4806394808c735e018370549f7822e86 | 2021-12-03T05:29:34.000Z | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Doohae | null | Doohae/roberta | 2 | null | transformers | 23,008 | Model for Extraction-based MRC
original model : klue/roberta-large
Designed for ODQA Competition |
Dragoniod1596/DialoGPT-small-Legacies | 0e570380ff3d0d8d64261b2078bf595d4167bf12 | 2021-10-15T13:13:44.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Dragoniod1596 | null | Dragoniod1596/DialoGPT-small-Legacies | 2 | null | transformers | 23,009 | ---
tags:
- conversational
---
# Legacies DialoGPT Model |
DrishtiSharma/wav2vec2-large-xls-r-300m-br-d10 | 2f55395d73f0d69791472aa8b7e7437c5fa17819 | 2022-03-24T11:56:43.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"br",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"generated_from_trainer",
"robust-speech-event",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-br-d10 | 2 | null | transformers | 23,010 | ---
language:
- br
license: apache-2.0
tags:
- generated_from_trainer
- robust-speech-event
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
metrics:
- wer
model-index:
- name: wav2vec2-large-xls-r-300m-br-d10
results:
- task:
type: automatic-speech-recognition
name: Speech Recognition
dataset:
type: mozilla-foundation/common_voice_8_0
name: Common Voice 8
args: br
metrics:
- type: wer
value: 0.5230357484228637
name: Test WER
- name: Test CER
type: cer
value: 0.1880661144228536
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: br
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-br-d10
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - BR dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1382
- Wer: 0.4895
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-br-d10 --dataset mozilla-foundation/common_voice_8_0 --config br --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Breton language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0004
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 800
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 13.611 | 0.68 | 100 | 5.8492 | 1.0 |
| 3.8176 | 1.35 | 200 | 3.2181 | 1.0 |
| 3.0457 | 2.03 | 300 | 3.0902 | 1.0 |
| 2.2632 | 2.7 | 400 | 1.4882 | 0.9426 |
| 1.1965 | 3.38 | 500 | 1.1396 | 0.7950 |
| 0.984 | 4.05 | 600 | 1.0216 | 0.7583 |
| 0.8036 | 4.73 | 700 | 1.0258 | 0.7202 |
| 0.7061 | 5.41 | 800 | 0.9710 | 0.6820 |
| 0.689 | 6.08 | 900 | 0.9731 | 0.6488 |
| 0.6063 | 6.76 | 1000 | 0.9442 | 0.6569 |
| 0.5215 | 7.43 | 1100 | 1.0221 | 0.6671 |
| 0.4965 | 8.11 | 1200 | 0.9266 | 0.6181 |
| 0.4321 | 8.78 | 1300 | 0.9050 | 0.5991 |
| 0.3762 | 9.46 | 1400 | 0.9801 | 0.6134 |
| 0.3747 | 10.14 | 1500 | 0.9210 | 0.5747 |
| 0.3554 | 10.81 | 1600 | 0.9720 | 0.6051 |
| 0.3148 | 11.49 | 1700 | 0.9672 | 0.6099 |
| 0.3176 | 12.16 | 1800 | 1.0120 | 0.5966 |
| 0.2915 | 12.84 | 1900 | 0.9490 | 0.5653 |
| 0.2696 | 13.51 | 2000 | 0.9394 | 0.5819 |
| 0.2569 | 14.19 | 2100 | 1.0197 | 0.5667 |
| 0.2395 | 14.86 | 2200 | 0.9771 | 0.5608 |
| 0.2367 | 15.54 | 2300 | 1.0516 | 0.5678 |
| 0.2153 | 16.22 | 2400 | 1.0097 | 0.5679 |
| 0.2092 | 16.89 | 2500 | 1.0143 | 0.5430 |
| 0.2046 | 17.57 | 2600 | 1.0884 | 0.5631 |
| 0.1937 | 18.24 | 2700 | 1.0113 | 0.5648 |
| 0.1752 | 18.92 | 2800 | 1.0056 | 0.5470 |
| 0.164 | 19.59 | 2900 | 1.0340 | 0.5508 |
| 0.1723 | 20.27 | 3000 | 1.0743 | 0.5615 |
| 0.1535 | 20.95 | 3100 | 1.0495 | 0.5465 |
| 0.1432 | 21.62 | 3200 | 1.0390 | 0.5333 |
| 0.1561 | 22.3 | 3300 | 1.0798 | 0.5590 |
| 0.1384 | 22.97 | 3400 | 1.1716 | 0.5449 |
| 0.1359 | 23.65 | 3500 | 1.1154 | 0.5420 |
| 0.1356 | 24.32 | 3600 | 1.0883 | 0.5387 |
| 0.1355 | 25.0 | 3700 | 1.1114 | 0.5504 |
| 0.1158 | 25.68 | 3800 | 1.1171 | 0.5388 |
| 0.1166 | 26.35 | 3900 | 1.1335 | 0.5403 |
| 0.1165 | 27.03 | 4000 | 1.1374 | 0.5248 |
| 0.1064 | 27.7 | 4100 | 1.0336 | 0.5298 |
| 0.0987 | 28.38 | 4200 | 1.0407 | 0.5216 |
| 0.104 | 29.05 | 4300 | 1.1012 | 0.5350 |
| 0.0894 | 29.73 | 4400 | 1.1016 | 0.5310 |
| 0.0912 | 30.41 | 4500 | 1.1383 | 0.5302 |
| 0.0972 | 31.08 | 4600 | 1.0851 | 0.5214 |
| 0.0832 | 31.76 | 4700 | 1.1705 | 0.5311 |
| 0.0859 | 32.43 | 4800 | 1.0750 | 0.5192 |
| 0.0811 | 33.11 | 4900 | 1.0900 | 0.5180 |
| 0.0825 | 33.78 | 5000 | 1.1271 | 0.5196 |
| 0.07 | 34.46 | 5100 | 1.1289 | 0.5141 |
| 0.0689 | 35.14 | 5200 | 1.0960 | 0.5101 |
| 0.068 | 35.81 | 5300 | 1.1377 | 0.5050 |
| 0.0776 | 36.49 | 5400 | 1.0880 | 0.5194 |
| 0.0642 | 37.16 | 5500 | 1.1027 | 0.5076 |
| 0.0607 | 37.84 | 5600 | 1.1293 | 0.5119 |
| 0.0607 | 38.51 | 5700 | 1.1229 | 0.5103 |
| 0.0545 | 39.19 | 5800 | 1.1168 | 0.5103 |
| 0.0562 | 39.86 | 5900 | 1.1206 | 0.5073 |
| 0.0484 | 40.54 | 6000 | 1.1710 | 0.5019 |
| 0.0499 | 41.22 | 6100 | 1.1511 | 0.5100 |
| 0.0455 | 41.89 | 6200 | 1.1488 | 0.5009 |
| 0.0475 | 42.57 | 6300 | 1.1196 | 0.4944 |
| 0.0413 | 43.24 | 6400 | 1.1654 | 0.4996 |
| 0.0389 | 43.92 | 6500 | 1.0961 | 0.4930 |
| 0.0428 | 44.59 | 6600 | 1.0955 | 0.4938 |
| 0.039 | 45.27 | 6700 | 1.1323 | 0.4955 |
| 0.0352 | 45.95 | 6800 | 1.1040 | 0.4930 |
| 0.0334 | 46.62 | 6900 | 1.1382 | 0.4942 |
| 0.0338 | 47.3 | 7000 | 1.1264 | 0.4911 |
| 0.0307 | 47.97 | 7100 | 1.1216 | 0.4881 |
| 0.0286 | 48.65 | 7200 | 1.1459 | 0.4894 |
| 0.0348 | 49.32 | 7300 | 1.1419 | 0.4906 |
| 0.0329 | 50.0 | 7400 | 1.1382 | 0.4895 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-hi-d3 | 625e2dd7896d173b712df159cf6a8b4ff949ed94 | 2022-03-24T11:52:54.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"hi",
"dataset:mozilla-foundation/common_voice_7_0",
"transformers",
"mozilla-foundation/common_voice_7_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-hi-d3 | 2 | null | transformers | 23,011 | ---
language:
- hi
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_7_0
- generated_from_trainer
- hi
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_7_0
model-index:
- name: wav2vec2-large-xls-r-300m-hi-d3
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 7
type: mozilla-foundation/common_voice_7_0
args: vot
metrics:
- name: Test WER
type: wer
value: 0.4204111781361566
- name: Test CER
type: cer
value: 0.13869169624556316
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: hi
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 7.0
type: mozilla-foundation/common_voice_7_0
args: hi
metrics:
- name: Test WER
type: wer
value: 42.04
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-hi-d3
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - HI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7988
- Wer: 0.3713
###Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-hi-d3 --dataset mozilla-foundation/common_voice_7_0 --config hi --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Hindi language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.000388
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 750
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 8.2826 | 1.36 | 200 | 3.5253 | 1.0 |
| 2.7019 | 2.72 | 400 | 1.1744 | 0.7360 |
| 0.7358 | 4.08 | 600 | 0.7781 | 0.5501 |
| 0.4942 | 5.44 | 800 | 0.7590 | 0.5345 |
| 0.4056 | 6.8 | 1000 | 0.6885 | 0.4776 |
| 0.3243 | 8.16 | 1200 | 0.7195 | 0.4861 |
| 0.2785 | 9.52 | 1400 | 0.7473 | 0.4930 |
| 0.2448 | 10.88 | 1600 | 0.7201 | 0.4574 |
| 0.2155 | 12.24 | 1800 | 0.7686 | 0.4648 |
| 0.2039 | 13.6 | 2000 | 0.7440 | 0.4624 |
| 0.1792 | 14.96 | 2200 | 0.7815 | 0.4658 |
| 0.1695 | 16.33 | 2400 | 0.7678 | 0.4557 |
| 0.1598 | 17.68 | 2600 | 0.7468 | 0.4393 |
| 0.1568 | 19.05 | 2800 | 0.7440 | 0.4422 |
| 0.1391 | 20.41 | 3000 | 0.7656 | 0.4317 |
| 0.1283 | 21.77 | 3200 | 0.7892 | 0.4299 |
| 0.1194 | 23.13 | 3400 | 0.7646 | 0.4192 |
| 0.1116 | 24.49 | 3600 | 0.8156 | 0.4330 |
| 0.1111 | 25.85 | 3800 | 0.7661 | 0.4322 |
| 0.1023 | 27.21 | 4000 | 0.7419 | 0.4276 |
| 0.1007 | 28.57 | 4200 | 0.8488 | 0.4245 |
| 0.0925 | 29.93 | 4400 | 0.8062 | 0.4070 |
| 0.0918 | 31.29 | 4600 | 0.8412 | 0.4218 |
| 0.0813 | 32.65 | 4800 | 0.8045 | 0.4087 |
| 0.0805 | 34.01 | 5000 | 0.8411 | 0.4113 |
| 0.0774 | 35.37 | 5200 | 0.7664 | 0.3943 |
| 0.0666 | 36.73 | 5400 | 0.8082 | 0.3939 |
| 0.0655 | 38.09 | 5600 | 0.7948 | 0.4000 |
| 0.0617 | 39.45 | 5800 | 0.8084 | 0.3932 |
| 0.0606 | 40.81 | 6000 | 0.8223 | 0.3841 |
| 0.0569 | 42.18 | 6200 | 0.7892 | 0.3832 |
| 0.0544 | 43.54 | 6400 | 0.8326 | 0.3834 |
| 0.0508 | 44.89 | 6600 | 0.7952 | 0.3774 |
| 0.0492 | 46.26 | 6800 | 0.7923 | 0.3756 |
| 0.0459 | 47.62 | 7000 | 0.7925 | 0.3701 |
| 0.0423 | 48.98 | 7200 | 0.7988 | 0.3713 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-hsb-v1 | f121e9452644f27af97d2a3ab286030ac47b5c57 | 2022-03-24T11:56:45.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"hsb",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-hsb-v1 | 2 | null | transformers | 23,012 | ---
language:
- hsb
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- hsb
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-hsb-v1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: hsb
metrics:
- name: Test WER
type: wer
value: 0.4393
- name: Test CER
type: cer
value: 0.1036
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: hsb
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-hsb-v1
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - HSB dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5684
- Wer: 0.4402
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-hsb-v1 --dataset mozilla-foundation/common_voice_8_0 --config hsb --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Upper Sorbian language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.00045
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 8.972 | 3.23 | 100 | 3.7498 | 1.0 |
| 3.3401 | 6.45 | 200 | 3.2320 | 1.0 |
| 3.2046 | 9.68 | 300 | 3.1741 | 0.9806 |
| 2.4031 | 12.9 | 400 | 1.0579 | 0.8996 |
| 1.0427 | 16.13 | 500 | 0.7989 | 0.7557 |
| 0.741 | 19.35 | 600 | 0.6405 | 0.6299 |
| 0.5699 | 22.58 | 700 | 0.6129 | 0.5928 |
| 0.4607 | 25.81 | 800 | 0.6548 | 0.5695 |
| 0.3827 | 29.03 | 900 | 0.6268 | 0.5190 |
| 0.3282 | 32.26 | 1000 | 0.5919 | 0.5016 |
| 0.2764 | 35.48 | 1100 | 0.5953 | 0.4805 |
| 0.2335 | 38.71 | 1200 | 0.5717 | 0.4728 |
| 0.2106 | 41.94 | 1300 | 0.5674 | 0.4569 |
| 0.1859 | 45.16 | 1400 | 0.5685 | 0.4502 |
| 0.1592 | 48.39 | 1500 | 0.5684 | 0.4402 |
### Framework versions
- Transformers 4.16.1
- Pytorch 1.10.0+cu111
- Datasets 1.18.2
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-hsb-v2 | 368adc84067e85cffae452eb08020e12124b69e6 | 2022-03-24T11:56:48.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"hsb",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-hsb-v2 | 2 | null | transformers | 23,013 | ---
language:
- hsb
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- hsb
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-hsb-v2
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: hsb
metrics:
- name: Test WER
type: wer
value: 0.4654228855721393
- name: Test CER
type: cer
value: 0.11351049990708047
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: hsb
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-hsb-v2
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - HSB dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5328
- Wer: 0.4596
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-hsb-v2 --dataset mozilla-foundation/common_voice_8_0 --config hsb --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Upper Sorbian (hsb) not found in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.00045
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 8.5979 | 3.23 | 100 | 3.5602 | 1.0 |
| 3.303 | 6.45 | 200 | 3.2238 | 1.0 |
| 3.2034 | 9.68 | 300 | 3.2002 | 0.9888 |
| 2.7986 | 12.9 | 400 | 1.2408 | 0.9210 |
| 1.3869 | 16.13 | 500 | 0.7973 | 0.7462 |
| 1.0228 | 19.35 | 600 | 0.6722 | 0.6788 |
| 0.8311 | 22.58 | 700 | 0.6100 | 0.6150 |
| 0.717 | 25.81 | 800 | 0.6236 | 0.6013 |
| 0.6264 | 29.03 | 900 | 0.6031 | 0.5575 |
| 0.5494 | 32.26 | 1000 | 0.5656 | 0.5309 |
| 0.4781 | 35.48 | 1100 | 0.5289 | 0.4996 |
| 0.4311 | 38.71 | 1200 | 0.5375 | 0.4768 |
| 0.3902 | 41.94 | 1300 | 0.5246 | 0.4703 |
| 0.3508 | 45.16 | 1400 | 0.5382 | 0.4696 |
| 0.3199 | 48.39 | 1500 | 0.5328 | 0.4596 |
### Framework versions
- Transformers 4.16.1
- Pytorch 1.10.0+cu111
- Datasets 1.18.2
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-kk-with-LM | 11eae4c4a5518393cf981cc9d7d7781b2149688f | 2022-03-24T11:52:57.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"kk",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-kk-with-LM | 2 | null | transformers | 23,014 | ---
language:
- kk
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- kk
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-kk-with-LM
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: ru
metrics:
- name: Test WER
type: wer
value: 0.4355
- name: Test CER
type: cer
value: 0.10469915859660263
- name: Test WER (+LM)
type: wer
value: 0.417
- name: Test CER (+LM)
type: cer
value: 0.10319098269566598
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: kk
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8.0
type: mozilla-foundation/common_voice_8_0
args: kk
metrics:
- name: Test WER
type: wer
value: 41.7
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Test Data
type: speech-recognition-community-v2/eval_data
args: kk
metrics:
- name: Test WER
type: wer
value: 67.09
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - KK dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7149
- Wer: 0.451
# Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-kk-with-LM --dataset mozilla-foundation/common_voice_8_0 --config kk --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Kazakh language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.000222
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 150.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 9.6799 | 9.09 | 200 | 3.6119 | 1.0 |
| 3.1332 | 18.18 | 400 | 2.5352 | 1.005 |
| 1.0465 | 27.27 | 600 | 0.6169 | 0.682 |
| 0.3452 | 36.36 | 800 | 0.6572 | 0.607 |
| 0.2575 | 45.44 | 1000 | 0.6527 | 0.578 |
| 0.2088 | 54.53 | 1200 | 0.6828 | 0.551 |
| 0.158 | 63.62 | 1400 | 0.7074 | 0.5575 |
| 0.1309 | 72.71 | 1600 | 0.6523 | 0.5595 |
| 0.1074 | 81.8 | 1800 | 0.7262 | 0.5415 |
| 0.087 | 90.89 | 2000 | 0.7199 | 0.521 |
| 0.0711 | 99.98 | 2200 | 0.7113 | 0.523 |
| 0.0601 | 109.09 | 2400 | 0.6863 | 0.496 |
| 0.0451 | 118.18 | 2600 | 0.6998 | 0.483 |
| 0.0378 | 127.27 | 2800 | 0.6971 | 0.4615 |
| 0.0319 | 136.36 | 3000 | 0.7119 | 0.4475 |
| 0.0305 | 145.44 | 3200 | 0.7181 | 0.459 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
### Evaluation Command
!python eval.py \
--model_id DrishtiSharma/wav2vec2-xls-r-300m-kk-n2 \
--dataset mozilla-foundation/common_voice_8_0 --config kk --split test --log_outputs |
DrishtiSharma/wav2vec2-large-xls-r-300m-pa-IN-dx1 | 1777871a3a5ca946f9030f8947c997a41bf0c4fa | 2022-03-24T11:52:59.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"pa-IN",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-pa-IN-dx1 | 2 | null | transformers | 23,015 | ---
language:
- pa-IN
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- pa-IN
- robust-speech-event
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-pa-IN-dx1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: pa-IN
metrics:
- name: Test WER
type: wer
value: 0.48725989807918463
- name: Test CER
type: cer
value: 0.1687305197540224
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: pa-IN
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - PA-IN dataset.
It achieves the following results on the evaluation set:
- Loss: 1.0855
- Wer: 0.4755
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-pa-IN-dx1 --dataset mozilla-foundation/common_voice_8_0 --config pa-IN --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Punjabi language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1200
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 3.4607 | 9.26 | 500 | 2.7746 | 1.0416 |
| 0.3442 | 18.52 | 1000 | 0.9114 | 0.5911 |
| 0.2213 | 27.78 | 1500 | 0.9687 | 0.5751 |
| 0.1242 | 37.04 | 2000 | 1.0204 | 0.5461 |
| 0.0998 | 46.3 | 2500 | 1.0250 | 0.5233 |
| 0.0727 | 55.56 | 3000 | 1.1072 | 0.5382 |
| 0.0605 | 64.81 | 3500 | 1.0588 | 0.5073 |
| 0.0458 | 74.07 | 4000 | 1.0818 | 0.5069 |
| 0.0338 | 83.33 | 4500 | 1.0948 | 0.5108 |
| 0.0223 | 92.59 | 5000 | 1.0986 | 0.4775 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-xls-r-300m-kk-n2 | a3503fd07ee232e8656f5a3328164b52d12c82c0 | 2022-03-24T11:54:53.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"kk",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-xls-r-300m-kk-n2 | 2 | null | transformers | 23,016 | ---
language:
- kk
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- kk
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-xls-r-300m-kk-n2
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: tt
metrics:
- name: Test WER
type: wer
value: 0.4355
- name: Test CER
type: cer
value: 0.10469915859660263
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: vot
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - KK dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7149
- Wer: 0.451
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-xls-r-300m-kk-n2 --dataset mozilla-foundation/common_voice_8_0 --config kk --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Kazakh language not found in speech-recognition-community-v2/dev_data!
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.000222
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 150.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 9.6799 | 9.09 | 200 | 3.6119 | 1.0 |
| 3.1332 | 18.18 | 400 | 2.5352 | 1.005 |
| 1.0465 | 27.27 | 600 | 0.6169 | 0.682 |
| 0.3452 | 36.36 | 800 | 0.6572 | 0.607 |
| 0.2575 | 45.44 | 1000 | 0.6527 | 0.578 |
| 0.2088 | 54.53 | 1200 | 0.6828 | 0.551 |
| 0.158 | 63.62 | 1400 | 0.7074 | 0.5575 |
| 0.1309 | 72.71 | 1600 | 0.6523 | 0.5595 |
| 0.1074 | 81.8 | 1800 | 0.7262 | 0.5415 |
| 0.087 | 90.89 | 2000 | 0.7199 | 0.521 |
| 0.0711 | 99.98 | 2200 | 0.7113 | 0.523 |
| 0.0601 | 109.09 | 2400 | 0.6863 | 0.496 |
| 0.0451 | 118.18 | 2600 | 0.6998 | 0.483 |
| 0.0378 | 127.27 | 2800 | 0.6971 | 0.4615 |
| 0.0319 | 136.36 | 3000 | 0.7119 | 0.4475 |
| 0.0305 | 145.44 | 3200 | 0.7181 | 0.459 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-xls-r-300m-mt-o1 | e209a6297d63c7d0f0265340804da656eb7e3ea4 | 2022-03-24T11:57:03.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"mt",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-xls-r-300m-mt-o1 | 2 | null | transformers | 23,017 | ---
language:
- mt
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- mt
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-xls-r-300m-mt-o1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: mt
metrics:
- name: Test WER
type: wer
value: 0.2378369069146646
- name: Test CER
type: cer
value: 0.050364163712536256
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: mt
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - MT dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1987
- Wer: 0.1920
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-xls-r-300m-mt-o1 --dataset mozilla-foundation/common_voice_8_0 --config mt --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Maltese language not found in speech-recognition-community-v2/dev_data!
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 32
- eval_batch_size: 1
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 2000
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 1.1721 | 18.02 | 2000 | 0.3831 | 0.4066 |
| 0.7849 | 36.04 | 4000 | 0.2191 | 0.2417 |
| 0.6723 | 54.05 | 6000 | 0.2056 | 0.2134 |
| 0.6015 | 72.07 | 8000 | 0.2008 | 0.2031 |
| 0.5386 | 90.09 | 10000 | 0.1967 | 0.1953 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-xls-r-300m-rm-sursilv-d11 | c18e9118724b13320d97a2b810f6061b38473b96 | 2022-03-23T18:35:27.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"rm-sursilv",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"hf-asr-leaderboard",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-xls-r-300m-rm-sursilv-d11 | 2 | null | transformers | 23,018 | ---
language:
- rm-sursilv
license: apache-2.0
tags:
- automatic-speech-recognition
- hf-asr-leaderboard
- robust-speech-event
datasets:
- mozilla-foundation/common_voice_8_0
metrics:
- wer
model-index:
- name: wav2vec2-xls-r-300m-rm-sursilv-d11
results:
- task:
type: automatic-speech-recognition
name: Speech Recognition
dataset:
type: mozilla-foundation/common_voice_8_0
name: Common Voice 8
args: rm-sursilv
metrics:
- type: wer
value: 0.24094169578811844
name: Test WER
- name: Test CER
type: cer
value: 0.049832791672554284
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: rm-sursilv
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - RM-SURSILV dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2511
- Wer: 0.2415
#### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-xls-r-300m-rm-sursilv-d11 --dataset mozilla-foundation/common_voice_8_0 --config rm-sursilv --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Romansh-Sursilv language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 32
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 2000
- num_epochs: 125.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:-----:|:---------------:|:------:|
| 2.3958 | 17.44 | 1500 | 0.6808 | 0.6521 |
| 0.9663 | 34.88 | 3000 | 0.3023 | 0.3718 |
| 0.7963 | 52.33 | 4500 | 0.2588 | 0.3046 |
| 0.6893 | 69.77 | 6000 | 0.2436 | 0.2718 |
| 0.6148 | 87.21 | 7500 | 0.2521 | 0.2572 |
| 0.5556 | 104.65 | 9000 | 0.2490 | 0.2442 |
| 0.5258 | 122.09 | 10500 | 0.2515 | 0.2442 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-xls-r-300m-rm-vallader-d1 | 0085fe6a858a974f8c2128ff737a16bbdf2232e0 | 2022-03-24T11:57:12.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"rm-vallader",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-xls-r-300m-rm-vallader-d1 | 2 | null | transformers | 23,019 | ---
language:
- rm-vallader
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- rm-vallader
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-xls-r-300m-rm-vallader-d1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: rm-vallader
metrics:
- name: Test WER
type: wer
value: 0.26472007722007723
- name: Test CER
type: cer
value: 0.05860608074430969
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: vot
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - RM-VALLADER dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2754
- Wer: 0.2831
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-xls-r-300m-rm-vallader-d1 --dataset mozilla-foundation/common_voice_8_0 --config rm-vallader --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Romansh-Vallader language not found in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7.5e-05
- train_batch_size: 32
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 2.927 | 15.15 | 500 | 2.9196 | 1.0 |
| 1.3835 | 30.3 | 1000 | 0.5879 | 0.5866 |
| 0.7415 | 45.45 | 1500 | 0.3077 | 0.3316 |
| 0.5575 | 60.61 | 2000 | 0.2735 | 0.2954 |
| 0.4581 | 75.76 | 2500 | 0.2707 | 0.2802 |
| 0.3977 | 90.91 | 3000 | 0.2785 | 0.2809 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-xls-r-sl-a2 | 15a0866dd66aa8f59734895ee6f947935e9bc110 | 2022-03-24T11:57:17.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"sl",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-xls-r-sl-a2 | 2 | null | transformers | 23,020 | ---
language:
- sl
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- sl
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-xls-r-sl-a2
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: sl
metrics:
- name: Test WER
type: wer
value: 0.21695212999560826
- name: Test CER
type: cer
value: 0.052850080572474256
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: vot
metrics:
- name: Test WER
type: wer
value: 0.560722380639029
- name: Test CER
type: cer
value: 0.2279626093074681
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: sl
metrics:
- name: Test WER
type: wer
value: 56.07
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Test Data
type: speech-recognition-community-v2/eval_data
args: sl
metrics:
- name: Test WER
type: wer
value: 56.19
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - SL dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2855
- Wer: 0.2401
##Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-xls-r-sl-a2 --dataset mozilla-foundation/common_voice_8_0 --config sl --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Votic language not found in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 6.9294 | 6.1 | 500 | 2.9712 | 1.0 |
| 2.8305 | 12.2 | 1000 | 1.7073 | 0.9479 |
| 1.4795 | 18.29 | 1500 | 0.5756 | 0.6397 |
| 1.3433 | 24.39 | 2000 | 0.4968 | 0.5424 |
| 1.1766 | 30.49 | 2500 | 0.4185 | 0.4743 |
| 1.0017 | 36.59 | 3000 | 0.3303 | 0.3578 |
| 0.9358 | 42.68 | 3500 | 0.3003 | 0.3051 |
| 0.8358 | 48.78 | 4000 | 0.3045 | 0.2884 |
| 0.7647 | 54.88 | 4500 | 0.2866 | 0.2677 |
| 0.7482 | 60.98 | 5000 | 0.2829 | 0.2585 |
| 0.6943 | 67.07 | 5500 | 0.2782 | 0.2478 |
| 0.6586 | 73.17 | 6000 | 0.2911 | 0.2537 |
| 0.6425 | 79.27 | 6500 | 0.2817 | 0.2462 |
| 0.6067 | 85.37 | 7000 | 0.2910 | 0.2436 |
| 0.5974 | 91.46 | 7500 | 0.2875 | 0.2430 |
| 0.5812 | 97.56 | 8000 | 0.2852 | 0.2396 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
EasthShin/Chatbot-LisaSimpson-DialoGPT | 575b7cdca4406fd38619563e754a27da546c65f0 | 2021-07-27T09:43:03.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | EasthShin | null | EasthShin/Chatbot-LisaSimpson-DialoGPT | 2 | null | transformers | 23,021 | Entry not found |
Ebtihal/AraDiaBERTo_V1 | a1a25afe637b00847db26a6b15ac04c48aac892b | 2021-09-17T14:04:50.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/AraDiaBERTo_V1 | 2 | null | transformers | 23,022 | Entry not found |
Edresson/wav2vec2-large-100k-voxpopuli-ft-Common-Voice_plus_TTS-Dataset-russian | 4b1e5ac99ae6535f9857936d3b9ae388541e9aec | 2022-07-17T17:39:27.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"ru",
"dataset:Common Voice",
"arxiv:2204.00618",
"transformers",
"audio",
"speech",
"russian-speech-corpus",
"PyTorch",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | Edresson | null | Edresson/wav2vec2-large-100k-voxpopuli-ft-Common-Voice_plus_TTS-Dataset-russian | 2 | 2 | transformers | 23,023 | ---
language: ru
datasets:
- Common Voice
metrics:
- wer
tags:
- audio
- speech
- wav2vec2
- ru
- russian-speech-corpus
- automatic-speech-recognition
- speech
- PyTorch
license: apache-2.0
model-index:
- name: Edresson Casanova Wav2vec2 Large 100k Voxpopuli fine-tuned with Common Voice and M-AILABS in Russian
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
metrics:
- name: Test Common Voice 7.0 WER
type: wer
value: 24.80
---
# Wav2vec2 Large 100k Voxpopuli fine-tuned with Common Voice and M-AILABS in Russian
[Wav2vec2 Large 100k Voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) fine-tuned in Russian using the Common Voice 7.0 and M-AILABS.
# Use this model
```python
from transformers import AutoTokenizer, Wav2Vec2ForCTC
tokenizer = AutoTokenizer.from_pretrained("Edresson/wav2vec2-large-100k-voxpopuli-ft-Common-Voice_plus_TTS-Dataset-russian")
model = Wav2Vec2ForCTC.from_pretrained("Edresson/wav2vec2-large-100k-voxpopuli-ft-Common-Voice_plus_TTS-Dataset-russian")
```
# Results
For the results check the [paper](https://arxiv.org/abs/2204.00618)
# Example test with Common Voice Dataset
```python
dataset = load_dataset("common_voice", "pt", split="test", data_dir="./cv-corpus-6.1-2020-12-11")
resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000)
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
batch["speech"] = resampler.forward(speech.squeeze(0)).numpy()
batch["sampling_rate"] = resampler.new_freq
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'")
return batch
```
```python
ds = dataset.map(map_to_array)
result = ds.map(map_to_pred, batched=True, batch_size=1, remove_columns=list(ds.features.keys()))
print(wer.compute(predictions=result["predicted"], references=result["target"]))
```
|
Einmalumdiewelt/T5-Large_GNAD | 2b1c2f725e1fbb4f09bd138bd82061894eeb4263 | 2022-01-13T14:48:46.000Z | [
"pytorch",
"t5",
"text2text-generation",
"de",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | Einmalumdiewelt | null | Einmalumdiewelt/T5-Large_GNAD | 2 | null | transformers | 23,024 | ---
language:
- de
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: T5-Large_GNAD
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# T5-Large_GNAD
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 2.4908
- Rouge1: 23.7414
- Rouge2: 8.4496
- Rougel: 16.7827
- Rougelsum: 19.8331
- Gen Len: 53.14
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
### Framework versions
- Transformers 4.16.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
|
EleutherAI/enformer-191k | 2452a8c484d5dcf9290f5400dc0dc448517b72c8 | 2022-02-23T12:18:31.000Z | [
"pytorch",
"enformer",
"transformers",
"license:apache-2.0"
] | null | false | EleutherAI | null | EleutherAI/enformer-191k | 2 | 1 | transformers | 23,025 | ---
license: apache-2.0
inference: false
---
# Enformer
Enformer model. It was introduced in the paper [Effective gene expression prediction from sequence by integrating long-range interactions.](https://www.nature.com/articles/s41592-021-01252-x) by Avsec et al. and first released in [this repository](https://github.com/deepmind/deepmind-research/tree/master/enformer).
This particular model was trained on sequences of 196,608 basepairs, target length 896, with shift augmentation but without reverse complement, on poisson loss objective. Final human pearson R of ~0.45.
This repo contains the weights of the PyTorch implementation by Phil Wang as seen in the [enformer-pytorch repository](https://github.com/lucidrains/enformer-pytorch).
Disclaimer: The team releasing Enformer did not write a model card for this model so this model card has been written by the Hugging Face team.
## Model description
Enformer is a neural network architecture based on the Transformer that led to greatly increased accuracy in predicting gene expression from DNA sequence.
We refer to the [paper](https://www.nature.com/articles/s41592-021-01252-x) published in Nature for details.
### How to use
Refer to the README of [enformer-pytorch](https://github.com/lucidrains/enformer-pytorch) regarding usage.
### Citation info
```
Avsec, Ž., Agarwal, V., Visentin, D. et al. Effective gene expression prediction from sequence by integrating long-range interactions. Nat Methods 18, 1196–1203 (2021). https://doi.org/10.1038/s41592-021-01252-x
``` |
Emi2160/DialoGPT-small-Neku | 23b31e63815f7d61ff93e377c836a9015eae67c9 | 2021-06-03T14:04:12.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Emi2160 | null | Emi2160/DialoGPT-small-Neku | 2 | null | transformers | 23,026 | ---
tags:
- conversational
---
# My Awesome Model |
EmileAjar/DialoGPT-small-harrypotter | 0ecbbd86635a65c31225d7fbc6e6b4e55096a4e1 | 2021-08-28T00:29:03.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | EmileAjar | null | EmileAjar/DialoGPT-small-harrypotter | 2 | null | transformers | 23,027 | ---
tags :
- conversational
---
# Harry Potter DialoGPT Model |
Emran/ClinicalBERT_ICD10_Full_200_epoch | acda3f8ca744e2a97173f71548c282b694f5f7e9 | 2021-10-13T10:57:15.000Z | [
"pytorch",
"bert",
"transformers"
] | null | false | Emran | null | Emran/ClinicalBERT_ICD10_Full_200_epoch | 2 | null | transformers | 23,028 | Entry not found |
Erfan/mT5-base_Farsi_Title_Generator_plus | e56692735a2b28617161def5247f99310a773f64 | 2022-02-10T13:43:30.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Erfan | null | Erfan/mT5-base_Farsi_Title_Generator_plus | 2 | 1 | transformers | 23,029 | Entry not found |
Erfan/mT5-small_Farsi_Title_Generator | b5562ce5028a2fe9553b5e9ef52d569f3ec6c3c9 | 2021-12-11T17:06:05.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"fa",
"transformers",
"Title-Generation",
"autotrain_compatible"
] | text2text-generation | false | Erfan | null | Erfan/mT5-small_Farsi_Title_Generator | 2 | 1 | transformers | 23,030 | ---
language:
- fa
tags:
- Title-Generation
metrics:
- ROUGH
---
|
EstoyDePaso/DialoGPT-small-harrypotter | 503c7390ce9ff0b81da7fe28bcc01f9fe90e5145 | 2021-09-19T19:04:42.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | EstoyDePaso | null | EstoyDePaso/DialoGPT-small-harrypotter | 2 | null | transformers | 23,031 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
Evgen/model_awara_text | 765df7b8e6df26c378dc64b0d6352bed0f9fb878 | 2022-02-09T07:56:40.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | Evgen | null | Evgen/model_awara_text | 2 | null | transformers | 23,032 | Entry not found |
Exilon/DialoGPT-large-quirk | 02c8b42ceda9fb042ee4b5434c6e18e32eb6d3f1 | 2021-12-08T09:37:40.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Exilon | null | Exilon/DialoGPT-large-quirk | 2 | null | transformers | 23,033 | ---
tags:
- conversational
---
# Quirk DialoGPT Model |
EzioDD/house | 820d408b33d56e1dd9358063666d5d2d030dad5c | 2021-12-31T09:41:57.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | EzioDD | null | EzioDD/house | 2 | null | transformers | 23,034 | ---
tags:
- conversational
---
#house small GPT |
FFF000/dialogpt-FFF | b690e50949a48a431cd6f5559baa47478fc7b13f | 2021-12-22T13:21:00.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | FFF000 | null | FFF000/dialogpt-FFF | 2 | null | transformers | 23,035 | ---
tags:
conversational
---
# FFF dialog model |
FOFer/distilbert-base-uncased-finetuned-squad | f184653a482d15b06332310fc1022f1418b2d0ba | 2022-02-23T04:37:46.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:squad_v2",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | FOFer | null | FOFer/distilbert-base-uncased-finetuned-squad | 2 | null | transformers | 23,036 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad_v2
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad_v2 dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4306
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 1.2169 | 1.0 | 8235 | 1.1950 |
| 0.9396 | 2.0 | 16470 | 1.2540 |
| 0.7567 | 3.0 | 24705 | 1.4306 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
Film8844/wangchanberta-ner | 95d1a6201412ce0a779dea66327d15863cd7ef08 | 2022-02-15T03:48:10.000Z | [
"pytorch",
"camembert",
"token-classification",
"transformers",
"autotrain_compatible"
] | token-classification | false | Film8844 | null | Film8844/wangchanberta-ner | 2 | null | transformers | 23,037 | Entry not found |
Finnish-NLP/electra-base-generator-finnish | 5e18a40e71b475212511eef55e538f0db186970d | 2022-06-13T16:14:44.000Z | [
"pytorch",
"electra",
"fill-mask",
"fi",
"dataset:Finnish-NLP/mc4_fi_cleaned",
"dataset:wikipedia",
"transformers",
"finnish",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Finnish-NLP | null | Finnish-NLP/electra-base-generator-finnish | 2 | null | transformers | 23,038 | ---
language:
- fi
license: apache-2.0
tags:
- finnish
- electra
datasets:
- Finnish-NLP/mc4_fi_cleaned
- wikipedia
widget:
- text: "Moikka olen [MASK] kielimalli."
---
# ELECTRA for Finnish
Pretrained ELECTRA model on Finnish language using a replaced token detection (RTD) objective. ELECTRA was introduced in
[this paper](https://openreview.net/pdf?id=r1xMH1BtvB)
and first released at [this page](https://github.com/google-research/electra).
**Note**: this model is the ELECTRA generator model intented to be used for the fill-mask task. The ELECTRA discriminator model intented to be used for fine-tuning on downstream tasks like text classification is released here [Finnish-NLP/electra-base-discriminator-finnish](https://huggingface.co/Finnish-NLP/electra-base-discriminator-finnish)
## Model description
Finnish ELECTRA is a transformers model pretrained on a very large corpus of Finnish data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts.
More precisely, it was pretrained with the replaced token detection (RTD) objective. Instead of masking the input like in BERT's masked language modeling (MLM) objective, this approach corrupts the input by replacing some tokens with plausible alternatives sampled from a small generator model. Then, instead of training a model that predicts the original identities of the corrupted tokens, a discriminative model is trained that predicts whether each token in the corrupted input was replaced by a generator model's sample or not. Thus, this training approach resembles Generative Adversarial Nets (GAN).
This way, the model learns an inner representation of the Finnish language that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the ELECTRA model as inputs.
## Intended uses & limitations
You can use this generator model mainly just for the fill-mask task. For other tasks, check the [Finnish-NLP/electra-base-discriminator-finnish](https://huggingface.co/Finnish-NLP/electra-base-discriminator-finnish) model instead.
### How to use
Here is how to use this model directly with a pipeline for fill-mask task:
```python
>>> from transformers import pipeline
>>> unmasker = pipeline('fill-mask', model='Finnish-NLP/electra-base-generator-finnish')
>>> unmasker("Moikka olen [MASK] kielimalli.")
[{'score': 0.0708453431725502,
'token': 4619,
'token_str': 'suomalainen',
'sequence': 'Moikka olen suomalainen kielimalli.'},
{'score': 0.042563650757074356,
'token': 1153,
'token_str': 'uusi',
'sequence': 'Moikka olen uusi kielimalli.'},
{'score': 0.03219178691506386,
'token': 591,
'token_str': 'hyvä',
'sequence': 'Moikka olen hyvä kielimalli.'},
{'score': 0.03175133094191551,
'token': 3134,
'token_str': 'vanha',
'sequence': 'Moikka olen vanha kielimalli.'},
{'score': 0.019662367179989815,
'token': 25583,
'token_str': 'ranskalainen',
'sequence': 'Moikka olen ranskalainen kielimalli.'}]
```
### Limitations and bias
The training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral. Therefore, the model can have biased predictions. This bias will also affect all fine-tuned versions of this model.
## Training data
This Finnish ELECTRA model was pretrained on the combination of five datasets:
- [mc4_fi_cleaned](https://huggingface.co/datasets/Finnish-NLP/mc4_fi_cleaned), the dataset mC4 is a multilingual colossal, cleaned version of Common Crawl's web crawl corpus. We used the Finnish subset of the mC4 dataset and further cleaned it with our own text data cleaning codes (check the dataset repo).
- [wikipedia](https://huggingface.co/datasets/wikipedia) We used the Finnish subset of the wikipedia (August 2021) dataset
- [Yle Finnish News Archive 2011-2018](http://urn.fi/urn:nbn:fi:lb-2017070501)
- [Finnish News Agency Archive (STT)](http://urn.fi/urn:nbn:fi:lb-2018121001)
- [The Suomi24 Sentences Corpus](http://urn.fi/urn:nbn:fi:lb-2020021803)
Raw datasets were cleaned to filter out bad quality and non-Finnish examples. Together these cleaned datasets were around 84GB of text.
## Training procedure
### Preprocessing
The texts are tokenized using WordPiece and a vocabulary size of 50265. The inputs are sequences of 512 consecutive tokens. Texts are not lower cased so this model is case-sensitive: it makes a difference between finnish and Finnish.
### Pretraining
The model was trained on TPUv3-8 VM, sponsored by the [Google TPU Research Cloud](https://sites.research.google/trc/about/), for 1M steps. The optimizer used was a AdamW with learning rate 2e-4, learning rate warmup for 20000 steps and linear decay of the learning rate after.
Training code was from the official [ELECTRA repository](https://github.com/google-research/electra) and also some instructions was used from [here](https://github.com/stefan-it/turkish-bert/blob/master/electra/CHEATSHEET.md).
## Evaluation results
For evaluation results, check the [Finnish-NLP/electra-base-discriminator-finnish](https://huggingface.co/Finnish-NLP/electra-base-discriminator-finnish) model repository instead.
## Acknowledgements
This project would not have been possible without compute generously provided by Google through the
[TPU Research Cloud](https://sites.research.google/trc/).
## Team Members
- Aapo Tanskanen, [Hugging Face profile](https://huggingface.co/aapot), [LinkedIn profile](https://www.linkedin.com/in/aapotanskanen/)
- Rasmus Toivanen, [Hugging Face profile](https://huggingface.co/RASMUS), [LinkedIn profile](https://www.linkedin.com/in/rasmustoivanen/)
Feel free to contact us for more details 🤗 |
Firat/distilbert-base-uncased-finetuned-squad | 8f1fb3d867effd2f9d7f71fabb2299f28451e297 | 2022-01-26T19:05:23.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | Firat | null | Firat/distilbert-base-uncased-finetuned-squad | 2 | null | transformers | 23,039 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1460
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.2856 | 1.0 | 2767 | 1.1919 |
| 1.012 | 2.0 | 5534 | 1.1332 |
| 0.8512 | 3.0 | 8301 | 1.1460 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.1
- Datasets 1.18.0
- Tokenizers 0.10.3
|
FirmanBr/FirmanBrilianBert | 33388e51dc2f9839cacb309e0818d7e13962cba2 | 2021-05-18T18:35:52.000Z | [
"pytorch",
"jax",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | FirmanBr | null | FirmanBr/FirmanBrilianBert | 2 | null | transformers | 23,040 | Entry not found |
FirmanBr/FirmanIndoLanguageModel | 6716a241c2b5c03c63bf6973db988a8509de9464 | 2021-05-18T18:37:51.000Z | [
"pytorch",
"jax",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | FirmanBr | null | FirmanBr/FirmanIndoLanguageModel | 2 | null | transformers | 23,041 | Entry not found |
FitoDS/xls-r-ab-test | 92ad3cab4ee90a4b226b48dcc7bfbbe9c044bca5 | 2022-01-25T13:49:52.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"ab",
"dataset:common_voice",
"transformers",
"common_voice",
"generated_from_trainer",
"model-index"
] | automatic-speech-recognition | false | FitoDS | null | FitoDS/xls-r-ab-test | 2 | null | transformers | 23,042 | ---
language:
- ab
tags:
- automatic-speech-recognition
- common_voice
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: ''
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [hf-test/xls-r-dummy](https://huggingface.co/hf-test/xls-r-dummy) on the COMMON_VOICE - AB dataset.
It achieves the following results on the evaluation set:
- Loss: 133.5167
- Wer: 18.9286
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 2.0
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.16.0.dev0
- Pytorch 1.10.1+cu102
- Datasets 1.17.1.dev0
- Tokenizers 0.11.0
|
FosterPatch/GoT-test | c7ea37e4ef7593eba404a6bff29c30590e7ca726 | 2021-10-22T22:22:19.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | FosterPatch | null | FosterPatch/GoT-test | 2 | null | transformers | 23,043 | ---
tags:
- conversational
---
# Chat Bot Test |
Francesco/resnet101-224-1k | 3096dc19a0ac27ea94f5af1ba1f44bca7537da2e | 2022-02-23T11:52:02.000Z | [
"pytorch",
"resnet",
"image-classification",
"transformers"
] | image-classification | false | Francesco | null | Francesco/resnet101-224-1k | 2 | null | transformers | 23,044 | Entry not found |
Francesco/resnet26-224-1k | b55f30d2b50829164425846535f40cdfedfa2f95 | 2022-02-23T11:49:59.000Z | [
"pytorch",
"resnet",
"image-classification",
"transformers"
] | image-classification | false | Francesco | null | Francesco/resnet26-224-1k | 2 | null | transformers | 23,045 | Entry not found |
Frederick0291/t5-small-finetuned-billsum | be9187abf042c6fe559f5002187ce42b2550190e | 2021-09-21T08:33:18.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:billsum",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | Frederick0291 | null | Frederick0291/t5-small-finetuned-billsum | 2 | null | transformers | 23,046 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- billsum
metrics:
- rouge
model-index:
- name: t5-small-finetuned-billsum
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: billsum
type: billsum
args: default
metrics:
- name: Rouge1
type: rouge
value: 16.6044
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-billsum
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the billsum dataset.
It achieves the following results on the evaluation set:
- Loss: 2.0972
- Rouge1: 16.6044
- Rouge2: 12.8656
- Rougel: 15.7876
- Rougelsum: 15.9784
- Gen Len: 18.9948
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
| 2.3854 | 1.0 | 2369 | 2.0972 | 16.6044 | 12.8656 | 15.7876 | 15.9784 | 18.9948 |
### Framework versions
- Transformers 4.10.2
- Pytorch 1.9.0+cu102
- Datasets 1.12.1
- Tokenizers 0.10.3
|
Frederick0291/t5-small-finetuned-xsum | 474139923b2abd64e49cb3ec2da8f5d4479816c7 | 2021-09-20T12:01:37.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | Frederick0291 | null | Frederick0291/t5-small-finetuned-xsum | 2 | null | transformers | 23,047 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: t5-small-finetuned-xsum-finetuned-billsum
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-xsum-finetuned-billsum
This model is a fine-tuned version of [Frederick0291/t5-small-finetuned-xsum](https://huggingface.co/Frederick0291/t5-small-finetuned-xsum) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
| No log | 1.0 | 330 | 1.8540 | 32.9258 | 14.9104 | 27.1067 | 27.208 | 18.8437 |
### Framework versions
- Transformers 4.10.2
- Pytorch 1.9.0+cu102
- Datasets 1.12.1
- Tokenizers 0.10.3
|
FutureFanatik/DialoGPT-small-rick | 48afbc415d3d378670129d629100ddd323d62d81 | 2021-07-07T04:50:31.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | FutureFanatik | null | FutureFanatik/DialoGPT-small-rick | 2 | null | transformers | 23,048 | Entry not found |
GKLMIP/electra-khmer-base-uncased | 43981afa6280478659b5da14c0986df7c83245a4 | 2021-07-31T05:29:24.000Z | [
"pytorch",
"electra",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | GKLMIP | null | GKLMIP/electra-khmer-base-uncased | 2 | null | transformers | 23,049 | https://github.com/GKLMIP/Pretrained-Models-For-Khmer
If you use our model, please consider citing our paper:
```
@article{,
author="Jiang, Shengyi
and Fu, Sihui
and Lin, Nankai
and Fu, Yingwen",
title="Pre-trained Models and Evaluation Data for the Khmer Language",
year="2021",
publisher="Tsinghua Science and Technology",
}
``` |
GKLMIP/roberta-hindi-devanagari | 01638bde90af5e599d33b30502208648a874b64f | 2021-10-13T13:44:42.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | GKLMIP | null | GKLMIP/roberta-hindi-devanagari | 2 | null | transformers | 23,050 | If you use our model, please consider citing our paper:
```
@InProceedings{,
author="Huang, Xixuan
and Lin, Nankai
and Li, Kexin
and Wang, Lianxi
and Gan SuiFu",
title="HinPLMs: Pre-trained Language Models for Hindi",
booktitle="The International Conference on Asian Language Processing",
year="2021",
publisher="IEEE Xplore"
}
``` |
GPL/bioasq-1m-tsdae-msmarco-distilbert-margin-mse | 939b5b1ca2be943794b05cf86d30ab5fe2f3ab06 | 2022-04-19T16:49:04.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | GPL | null | GPL/bioasq-1m-tsdae-msmarco-distilbert-margin-mse | 2 | null | transformers | 23,051 | Entry not found |
GPL/cqadupstack-msmarco-distilbert-gpl | 6d12956e518a1c997e282b3254b5a668a737e63f | 2022-04-19T15:19:20.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/cqadupstack-msmarco-distilbert-gpl | 2 | null | sentence-transformers | 23,052 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GPL/cqadupstack-tsdae-msmarco-distilbert-gpl | 41146c3835ea43fa9eead473b834ba93fe367ca4 | 2022-04-19T15:30:49.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/cqadupstack-tsdae-msmarco-distilbert-gpl | 2 | null | sentence-transformers | 23,053 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GPL/trec-covid-v2-tsdae-msmarco-distilbert-margin-mse | 1e17d93a81469a87506de6deedb95fd934aa4b55 | 2022-04-19T16:49:32.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | GPL | null | GPL/trec-covid-v2-tsdae-msmarco-distilbert-margin-mse | 2 | null | transformers | 23,054 | Entry not found |
Gabriel/paraphrase-multi-mpnet-base-atkins | 1eaa31465ca066c3ba94b49f50e41feb9a1ba92a | 2021-07-22T13:36:01.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | Gabriel | null | Gabriel/paraphrase-multi-mpnet-base-atkins | 2 | null | sentence-transformers | 23,055 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, max pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 1526 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
```
{'scale': 20.0, 'similarity_fct': 'cos_sim'}
```
Parameters of the fit()-Method:
```
{
"callback": null,
"epochs": 10,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": null,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: XLMRobertaModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
Galuh/wav2vec2-large-xlsr-indonesian | 82bbac6b4566ca3f6f5fc3ca5d083cf63a3754d3 | 2021-07-05T14:21:19.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"id",
"dataset:common_voice",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | Galuh | null | Galuh/wav2vec2-large-xlsr-indonesian | 2 | 1 | transformers | 23,056 | ---
language: id
datasets:
- common_voice
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Indonesian by Galuh
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice id
type: common_voice
args: id
metrics:
- name: Test WER
type: wer
value: 21.07
---
# Wav2Vec2-Large-XLSR-Indonesian
This is the model for Wav2Vec2-Large-XLSR-Indonesian, a fine-tuned
[facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53)
model on the [Indonesian Common Voice dataset](https://huggingface.co/datasets/common_voice).
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "id", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("Galuh/wav2vec2-large-xlsr-indonesian")
model = Wav2Vec2ForCTC.from_pretrained("Galuh/wav2vec2-large-xlsr-indonesian")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Indonesian test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "id", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("Galuh/wav2vec2-large-xlsr-indonesian")
model = Wav2Vec2ForCTC.from_pretrained("Galuh/wav2vec2-large-xlsr-indonesian")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\'\”\�]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
resampler = torchaudio.transforms.Resample(sampling_rate, 16_000)
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 18.32 %
## Training
The Common Voice `train`, `validation`, and ... datasets were used for training as well as ... and ... # TODO
The script used for training can be found [here](https://github.com/galuhsahid/wav2vec2-indonesian)
(will be available soon) |
Galuh/xlsr-indonesian | f6f8eb90a12bcae47cd9bdf20f6f389f3ab680df | 2021-07-05T14:23:33.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Galuh | null | Galuh/xlsr-indonesian | 2 | null | transformers | 23,057 | Entry not found |
Gantenbein/ADDI-CH-RoBERTa | efe56ae4e09394789879dfe9dea4f1f7814f45c8 | 2021-06-01T13:54:05.000Z | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Gantenbein | null | Gantenbein/ADDI-CH-RoBERTa | 2 | null | transformers | 23,058 | Entry not found |
Gantenbein/ADDI-FI-GPT2 | 9f926ca25b438424f288e6448bf7a17a9dc596d9 | 2021-06-01T14:11:36.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Gantenbein | null | Gantenbein/ADDI-FI-GPT2 | 2 | null | transformers | 23,059 | Entry not found |
Gantenbein/ADDI-IT-GPT2 | 060a8d4deecb6e8dd1f14de834a333126652f8d5 | 2021-06-01T14:25:36.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Gantenbein | null | Gantenbein/ADDI-IT-GPT2 | 2 | null | transformers | 23,060 | Entry not found |
Gantenbein/ADDI-IT-RoBERTa | 0236f56f1121dffcf100d4d4c894855c3a749c4d | 2021-06-01T14:25:12.000Z | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Gantenbein | null | Gantenbein/ADDI-IT-RoBERTa | 2 | null | transformers | 23,061 | Entry not found |
Gappy/DialoGPT-small-Zhongli | 4d8dc71ec00406eb5a8e605cdcd151f29c1e206c | 2021-09-06T02:34:12.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Gappy | null | Gappy/DialoGPT-small-Zhongli | 2 | null | transformers | 23,062 | ---
tags:
- conversational
---
# Zhongli DialoGPT Model |
Gastron/asr-crdnn-librispeech | 5aa5a52d9d8a87463ae40d3d8a2c5443bf9945ee | 2021-02-26T15:23:04.000Z | [
"en",
"dataset:librispeech",
"ASR",
"CTC",
"Attention",
"pytorch",
"license:apache-2.0"
] | null | false | Gastron | null | Gastron/asr-crdnn-librispeech | 2 | null | null | 23,063 | ---
language: "en"
thumbnail:
tags:
- ASR
- CTC
- Attention
- pytorch
license: "apache-2.0"
datasets:
- librispeech
metrics:
- wer
- cer
---
# CRDNN with CTC/Attention and RNNLM trained on LibriSpeech
This repository provides all the necessary tools to perform automatic speech
recognition from an end-to-end system pretrained on LibriSpeech (EN) within
SpeechBrain. For a better experience we encourage you to learn more about
[SpeechBrain](https://speechbrain.github.io). The given ASR model performance are:
| Release | hyperparams file | Test WER | Model link | GPUs |
|:-------------:|:---------------------------:| -----:| -----:| --------:|
| 20-05-22 | BPE_1000.yaml | 3.08 | Not Available | 1xV100 32GB |
| 20-05-22 | BPE_5000.yaml | 2.89 | Not Available | 1xV100 32GB |
## Pipeline description
This ASR system is composed with 3 different but linked blocks:
1. Tokenizer (unigram) that transforms words into subword units and trained with
the train transcriptions of LibriSpeech.
2. Neural language model (RNNLM) trained on the full 10M words dataset.
3. Acoustic model (CRDNN + CTC/Attention). The CRDNN architecture is made of
N blocks of convolutional neural networks with normalisation and pooling on the
frequency domain. Then, a bidirectional LSTM is connected to a final DNN to obtain
the final acoustic representation that is given to the CTC and attention decoders.
## Intended uses & limitations
This model has been primilarly developed to be run within SpeechBrain as a pretrained ASR model
for the english language. Thanks to the flexibility of SpeechBrain, any of the 3 blocks
detailed above can be extracted and connected to you custom pipeline as long as SpeechBrain is
installed.
## Install SpeechBrain
First of all, please install SpeechBrain with the following command:
```
pip install \\we hide ! SpeechBrain is still private :p
```
Also, for this model, you need SentencePiece. Install with
```
pip install sentencepiece
```
Please notice that we encourage you to read our tutorials and learn more about
[SpeechBrain](https://speechbrain.github.io).
### Transcribing your own audio files
```python
from speechbrain.pretrained import EncoderDecoderASR
asr_model = EncoderDecoderASR.from_hparams(source="Gastron/asr-crdnn-librispeech")
asr_model.transcribe_file("path_to_your_file.wav")
```
### Obtaining encoded features
The SpeechBrain EncoderDecoderASR() class also provides an easy way to encode
the speech signal without running the decoding phase by calling
``EncoderDecoderASR.encode_batch()``
#### Referencing SpeechBrain
```
@misc{SB2021,
author = {Ravanelli, Mirco and Parcollet, Titouan and Rouhe, Aku and Plantinga, Peter and Rastorgueva, Elena and Lugosch, Loren and Dawalatabad, Nauman and Ju-Chieh, Chou and Heba, Abdel and Grondin, Francois and Aris, William and Liao, Chien-Feng and Cornell, Samuele and Yeh, Sung-Lin and Na, Hwidong and Gao, Yan and Fu, Szu-Wei and Subakan, Cem and De Mori, Renato and Bengio, Yoshua },
title = {SpeechBrain},
year = {2021},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/speechbrain/speechbrain}},
}
```
|
GenDelport/DialoGPT-small-harrypotter | fb1f22b02d751dd7cc1569751c1d4c352464fa05 | 2021-09-03T10:59:02.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | GenDelport | null | GenDelport/DialoGPT-small-harrypotter | 2 | null | transformers | 23,064 | ---
tags:
- conversational
---
#Harry Potter DialoGPT Model |
Geotrend/bert-base-en-fr-da-ja-vi-cased | 71f618d9117833262b27aac3d3c88f4a50d80b9b | 2021-05-18T19:17:37.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/bert-base-en-fr-da-ja-vi-cased | 2 | null | transformers | 23,065 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# bert-base-en-fr-da-ja-vi-cased
We are sharing smaller versions of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) that handle a custom number of languages.
Unlike [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased), our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/bert-base-en-fr-da-ja-vi-cased")
model = AutoModel.from_pretrained("Geotrend/bert-base-en-fr-da-ja-vi-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/bert-base-en-fr-lt-no-pl-cased | a8b3f756feedc85f96f33fe759f4270e70d0a3ff | 2021-05-18T19:25:38.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/bert-base-en-fr-lt-no-pl-cased | 2 | null | transformers | 23,066 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# bert-base-en-fr-lt-no-pl-cased
We are sharing smaller versions of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) that handle a custom number of languages.
Unlike [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased), our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/bert-base-en-fr-lt-no-pl-cased")
model = AutoModel.from_pretrained("Geotrend/bert-base-en-fr-lt-no-pl-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/bert-base-en-it-cased | 72d9a5cf8570ad18f06b8bb77e64477486ba7a05 | 2021-05-18T19:32:13.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/bert-base-en-it-cased | 2 | null | transformers | 23,067 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# bert-base-en-it-cased
We are sharing smaller versions of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) that handle a custom number of languages.
Unlike [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased), our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/bert-base-en-it-cased")
model = AutoModel.from_pretrained("Geotrend/bert-base-en-it-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/bert-base-en-lt-cased | c9bd56e49e971fb6a5e439d26d9390840732f2dd | 2021-05-18T19:38:31.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/bert-base-en-lt-cased | 2 | null | transformers | 23,068 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# bert-base-en-lt-cased
We are sharing smaller versions of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) that handle a custom number of languages.
Unlike [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased), our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/bert-base-en-lt-cased")
model = AutoModel.from_pretrained("Geotrend/bert-base-en-lt-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/bert-base-en-no-cased | d6e909aec37efb6e101ebf1800c2a1205751aea8 | 2021-05-18T19:40:40.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/bert-base-en-no-cased | 2 | null | transformers | 23,069 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# bert-base-en-no-cased
We are sharing smaller versions of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) that handle a custom number of languages.
Unlike [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased), our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/bert-base-en-no-cased")
model = AutoModel.from_pretrained("Geotrend/bert-base-en-no-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/bert-base-ja-cased | 5c7f7560b2aadda499a0ac10c1fb41e86b462290 | 2021-05-18T19:59:21.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"ja",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/bert-base-ja-cased | 2 | null | transformers | 23,070 | ---
language: ja
datasets: wikipedia
license: apache-2.0
---
# bert-base-ja-cased
We are sharing smaller versions of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) that handle a custom number of languages.
Unlike [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased), our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/bert-base-ja-cased")
model = AutoModel.from_pretrained("Geotrend/bert-base-ja-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-bg-cased | 323e10664c9cee3ebebdc4843a283e24af25c693 | 2021-08-16T13:25:28.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"bg",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-bg-cased | 2 | null | transformers | 23,071 | ---
language: bg
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-bg-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-bg-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-bg-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-el-cased | dd57977d64ccc470381a6a78244b2c78c5e6af59 | 2021-08-16T13:17:43.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"el",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-el-cased | 2 | null | transformers | 23,072 | ---
language: el
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-el-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-el-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-el-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-en-bg-cased | f9e571f75c2594fbfbc63ea90fce85172b09a4dd | 2021-08-16T14:06:03.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-en-bg-cased | 2 | null | transformers | 23,073 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-en-bg-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-en-bg-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-en-bg-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-en-da-cased | 216e0be2c16626c14461b17aad12118a791b8a83 | 2021-07-29T10:29:09.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-en-da-cased | 2 | null | transformers | 23,074 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-en-da-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-en-da-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-en-da-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-en-fr-ar-cased | 6dcb66288e834c5d6649f9f35175b2d9de3fc69d | 2021-07-27T12:29:21.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-en-fr-ar-cased | 2 | null | transformers | 23,075 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-en-fr-ar-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-en-fr-ar-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-en-fr-ar-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-en-fr-uk-el-ro-cased | c638ac53446028684f6f68a7931ffa61e5053346 | 2021-07-28T13:34:16.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-en-fr-uk-el-ro-cased | 2 | 1 | transformers | 23,076 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-en-fr-uk-el-ro-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-en-fr-uk-el-ro-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-en-fr-uk-el-ro-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-en-fr-zh-cased | c79c91029c5fcfcf34707f3e26f83e77f6a2f008 | 2021-07-28T12:29:43.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-en-fr-zh-cased | 2 | null | transformers | 23,077 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-en-fr-zh-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-en-fr-zh-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-en-fr-zh-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-en-pt-cased | f6611bb4e0e87d191cb5d1ca27030dc011c1f87b | 2021-07-29T10:53:12.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-en-pt-cased | 2 | null | transformers | 23,078 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-en-pt-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-en-pt-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-en-pt-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-en-sw-cased | 44a731342050f180223ba131bce366c7ec00964b | 2021-08-16T13:49:20.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-en-sw-cased | 2 | null | transformers | 23,079 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-en-sw-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-en-sw-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-en-sw-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-en-vi-cased | bd39086e3dc2e66859f0b9a3b377eb62b6a8655b | 2021-08-16T13:45:28.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"multilingual",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-en-vi-cased | 2 | null | transformers | 23,080 | ---
language: multilingual
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-en-vi-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-en-vi-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-en-vi-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-it-cased | c5201e9308ca4b5c63965ffeb4e1b226cd9a6df3 | 2021-07-27T07:08:03.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"it",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-it-cased | 2 | 1 | transformers | 23,081 | ---
language: it
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-it-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-it-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-it-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-lt-cased | 120fdff0f4996dba5125cdb3aff88c4dd1558931 | 2021-07-27T08:43:07.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"lt",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-lt-cased | 2 | null | transformers | 23,082 | ---
language: lt
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-lt-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-lt-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-lt-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-no-cased | b16afc0586b623fbf1ccea573e25954c274e6542 | 2021-07-27T09:05:34.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"no",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-no-cased | 2 | null | transformers | 23,083 | ---
language: no
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-no-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-no-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-no-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Geotrend/distilbert-base-tr-cased | 9c97458cc862e614e242d7d84e5700146b736d0d | 2021-08-16T13:20:04.000Z | [
"pytorch",
"distilbert",
"fill-mask",
"tr",
"dataset:wikipedia",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | false | Geotrend | null | Geotrend/distilbert-base-tr-cased | 2 | null | transformers | 23,084 | ---
language: tr
datasets: wikipedia
license: apache-2.0
---
# distilbert-base-tr-cased
We are sharing smaller versions of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) that handle a custom number of languages.
Our versions give exactly the same representations produced by the original model which preserves the original accuracy.
For more information please visit our paper: [Load What You Need: Smaller Versions of Multilingual BERT](https://www.aclweb.org/anthology/2020.sustainlp-1.16.pdf).
## How to use
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Geotrend/distilbert-base-tr-cased")
model = AutoModel.from_pretrained("Geotrend/distilbert-base-tr-cased")
```
To generate other smaller versions of multilingual transformers please visit [our Github repo](https://github.com/Geotrend-research/smaller-transformers).
### How to cite
```bibtex
@inproceedings{smallermdistilbert,
title={Load What You Need: Smaller Versions of Mutlilingual BERT},
author={Abdaoui, Amine and Pradel, Camille and Sigel, Grégoire},
booktitle={SustaiNLP / EMNLP},
year={2020}
}
```
## Contact
Please contact [email protected] for any question, feedback or request. |
Gigworks/ASR_id | f07b56be72e9340c0664ff9b3e294c6b14f453f4 | 2021-10-22T07:28:30.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Gigworks | null | Gigworks/ASR_id | 2 | null | transformers | 23,085 | # Wav2Vec2-Large-XLSR-Indonesian
Fine-tuned: facebook/wav2vec2-large-xlsr-53 |
GusNicho/distilbert-base-cased-finetuned | 5f55c0fcae35241adc1c48224e476ce3c6caf47a | 2022-01-12T07:41:34.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"fill-mask",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | fill-mask | false | GusNicho | null | GusNicho/distilbert-base-cased-finetuned | 2 | null | transformers | 23,086 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: distilbert-base-cased-finetuned
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-cased-finetuned
This model is a fine-tuned version of [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9161
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.3101 | 1.0 | 974 | 2.0502 |
| 2.0831 | 2.0 | 1948 | 1.9627 |
| 2.0198 | 3.0 | 2922 | 1.8998 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.9.1
- Datasets 1.16.1
- Tokenizers 0.10.3
|
Haechang/t5-small-finetuned-xsum | 883137eee6e5ee4def8f93eb64b5cbabed116781 | 2022-01-21T12:15:28.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Haechang | null | Haechang/t5-small-finetuned-xsum | 2 | null | transformers | 23,087 | Entry not found |
HaitaoYang/bert_cn_bi-classification | e04bcc3a00da91ad4d98dd7c414d751a592d1a20 | 2021-09-04T11:14:00.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | HaitaoYang | null | HaitaoYang/bert_cn_bi-classification | 2 | null | transformers | 23,088 | Entry not found |
Hamas/DialoGPT-large-jake | 7d77c3867a357460e196cd58b45f51e55f96c158 | 2021-09-26T05:28:55.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Hamas | null | Hamas/DialoGPT-large-jake | 2 | null | transformers | 23,089 | ---
tags:
- conversational
---
# Jake DialoGPT-large-jake
|
HansAnonymous/DialoGPT-small-shrek | a2260161a6c331cc154dff971d6d6d96dc8130ed | 2021-09-02T04:24:57.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | HansAnonymous | null | HansAnonymous/DialoGPT-small-shrek | 2 | null | transformers | 23,090 | ---
tags:
- conversational
---
# Shrek from Shrek DialoGPT Model |
Haotian/distilgpt2-finetuned-wikitext2 | 07199338295b189f71f10ba951619a1472d87e49 | 2021-09-22T12:24:29.000Z | [
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-generation | false | Haotian | null | Haotian/distilgpt2-finetuned-wikitext2 | 2 | null | transformers | 23,091 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: distilgpt2-finetuned-wikitext2
results:
- task:
name: Causal Language Modeling
type: text-generation
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilgpt2-finetuned-wikitext2
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 3.6424
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.7608 | 1.0 | 2334 | 3.6655 |
| 3.6335 | 2.0 | 4668 | 3.6455 |
| 3.6066 | 3.0 | 7002 | 3.6424 |
### Framework versions
- Transformers 4.10.2
- Pytorch 1.9.0+cu102
- Datasets 1.12.0
- Tokenizers 0.10.3
|
HarrisDePerceptron/xls-r-300m-ur | 5679334b30e2d2fd366a8db400ee787407996e01 | 2022-03-24T11:51:43.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"ur",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | HarrisDePerceptron | null | HarrisDePerceptron/xls-r-300m-ur | 2 | null | transformers | 23,092 | ---
language:
- ur
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- ur
- robust-speech-event
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: ''
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8.0
type: mozilla-foundation/common_voice_8_0
args: ur
metrics:
- name: Test WER
type: wer
value: 47.38
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [HarrisDePerceptron/xls-r-300m-ur](https://huggingface.co/HarrisDePerceptron/xls-r-300m-ur) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - UR dataset.
It achieves the following results on the evaluation set:
- Loss: 1.0517
- WER: 0.5151291512915129
- CER: 0.23689640940982254
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7.5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 16
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 100
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 1.2991 | 1.96 | 100 | 0.9769 | 0.6627 |
| 1.3415 | 3.92 | 200 | 0.9701 | 0.6594 |
| 1.2998 | 5.88 | 300 | 0.9678 | 0.6668 |
| 1.2881 | 7.84 | 400 | 0.9650 | 0.6613 |
| 1.2369 | 9.8 | 500 | 0.9392 | 0.6502 |
| 1.2293 | 11.76 | 600 | 0.9536 | 0.6480 |
| 1.1709 | 13.73 | 700 | 0.9265 | 0.6402 |
| 1.1492 | 15.69 | 800 | 0.9636 | 0.6506 |
| 1.1044 | 17.65 | 900 | 0.9305 | 0.6351 |
| 1.0704 | 19.61 | 1000 | 0.9329 | 0.6280 |
| 1.0039 | 21.57 | 1100 | 0.9413 | 0.6295 |
| 0.9756 | 23.53 | 1200 | 0.9718 | 0.6185 |
| 0.9633 | 25.49 | 1300 | 0.9731 | 0.6133 |
| 0.932 | 27.45 | 1400 | 0.9659 | 0.6199 |
| 0.9252 | 29.41 | 1500 | 0.9766 | 0.6196 |
| 0.9172 | 31.37 | 1600 | 1.0052 | 0.6199 |
| 0.8733 | 33.33 | 1700 | 0.9955 | 0.6203 |
| 0.868 | 35.29 | 1800 | 1.0069 | 0.6240 |
| 0.8547 | 37.25 | 1900 | 0.9783 | 0.6258 |
| 0.8451 | 39.22 | 2000 | 0.9845 | 0.6052 |
| 0.8374 | 41.18 | 2100 | 0.9496 | 0.6137 |
| 0.8153 | 43.14 | 2200 | 0.9756 | 0.6122 |
| 0.8134 | 45.1 | 2300 | 0.9712 | 0.6096 |
| 0.8019 | 47.06 | 2400 | 0.9565 | 0.5970 |
| 0.7746 | 49.02 | 2500 | 0.9864 | 0.6096 |
| 0.7664 | 50.98 | 2600 | 0.9988 | 0.6092 |
| 0.7708 | 52.94 | 2700 | 1.0181 | 0.6255 |
| 0.7468 | 54.9 | 2800 | 0.9918 | 0.6148 |
| 0.7241 | 56.86 | 2900 | 1.0150 | 0.6018 |
| 0.7165 | 58.82 | 3000 | 1.0439 | 0.6063 |
| 0.7104 | 60.78 | 3100 | 1.0016 | 0.6037 |
| 0.6954 | 62.75 | 3200 | 1.0117 | 0.5970 |
| 0.6753 | 64.71 | 3300 | 1.0191 | 0.6037 |
| 0.6803 | 66.67 | 3400 | 1.0190 | 0.6033 |
| 0.661 | 68.63 | 3500 | 1.0284 | 0.6007 |
| 0.6597 | 70.59 | 3600 | 1.0060 | 0.5967 |
| 0.6398 | 72.55 | 3700 | 1.0372 | 0.6048 |
| 0.6105 | 74.51 | 3800 | 1.0048 | 0.6044 |
| 0.6164 | 76.47 | 3900 | 1.0398 | 0.6148 |
| 0.6354 | 78.43 | 4000 | 1.0272 | 0.6133 |
| 0.5952 | 80.39 | 4100 | 1.0364 | 0.6081 |
| 0.5814 | 82.35 | 4200 | 1.0418 | 0.6092 |
| 0.6079 | 84.31 | 4300 | 1.0277 | 0.5967 |
| 0.5748 | 86.27 | 4400 | 1.0362 | 0.6041 |
| 0.5624 | 88.24 | 4500 | 1.0427 | 0.6007 |
| 0.5767 | 90.2 | 4600 | 1.0370 | 0.5919 |
| 0.5793 | 92.16 | 4700 | 1.0442 | 0.6011 |
| 0.547 | 94.12 | 4800 | 1.0516 | 0.5982 |
| 0.5513 | 96.08 | 4900 | 1.0461 | 0.5989 |
| 0.5429 | 98.04 | 5000 | 1.0504 | 0.5996 |
| 0.5404 | 100.0 | 5100 | 1.0517 | 0.5967 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.3
- Tokenizers 0.11.0
|
Harveenchadha/vakyansh-wav2vec2-assamese-asm-8 | eaf8f63ce2a5845351e27a26ec9c0c36a1482bd1 | 2021-12-17T17:42:49.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Harveenchadha | null | Harveenchadha/vakyansh-wav2vec2-assamese-asm-8 | 2 | null | transformers | 23,093 | Entry not found |
Harveenchadha/vakyansh-wav2vec2-bhojpuri-bhom-60 | 3802703b24e0583de4eb7067d30acf3404fd0fde | 2021-12-17T17:46:21.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Harveenchadha | null | Harveenchadha/vakyansh-wav2vec2-bhojpuri-bhom-60 | 2 | null | transformers | 23,094 | Entry not found |
Harveenchadha/vakyansh-wav2vec2-gujarati-gnm-100 | 4466c36642f5bf5390aa938fa53f94b09c741285 | 2021-08-02T18:46:40.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Harveenchadha | null | Harveenchadha/vakyansh-wav2vec2-gujarati-gnm-100 | 2 | null | transformers | 23,095 | Entry not found |
Harveenchadha/vakyansh-wav2vec2-kannada-knm-560 | 0564782ee3f5a47f1db8af94bcdc942bb0d5bb29 | 2021-08-02T18:52:55.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Harveenchadha | null | Harveenchadha/vakyansh-wav2vec2-kannada-knm-560 | 2 | null | transformers | 23,096 | Entry not found |
Harveenchadha/vakyansh-wav2vec2-malayalam-mlm-8 | 0bddf3600b2d0f327e7316a49f265e57f7d95400 | 2021-12-17T17:50:07.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Harveenchadha | null | Harveenchadha/vakyansh-wav2vec2-malayalam-mlm-8 | 2 | null | transformers | 23,097 | Entry not found |
Harveenchadha/vakyansh-wav2vec2-marathi-mrm-100 | 82f3b0fac2c26ffd8965a031a90ad61132b233ee | 2021-12-17T17:51:20.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Harveenchadha | null | Harveenchadha/vakyansh-wav2vec2-marathi-mrm-100 | 2 | null | transformers | 23,098 | Entry not found |
Harveenchadha/vakyansh-wav2vec2-odia-orm-100 | 9598398daf887755045258267702854ec8831066 | 2021-12-17T17:54:13.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Harveenchadha | null | Harveenchadha/vakyansh-wav2vec2-odia-orm-100 | 2 | null | transformers | 23,099 | Entry not found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.