modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Declan/CNN_model_v2 | d5a8174e55e7fde3f240b77439c02543622bdc89 | 2021-12-15T11:22:14.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/CNN_model_v2 | 1 | null | transformers | 27,900 | Entry not found |
Declan/CNN_model_v6 | a427f7e856952168b81c43bf181e273db0ab97aa | 2021-12-19T11:06:00.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/CNN_model_v6 | 1 | null | transformers | 27,901 | Entry not found |
Declan/ChicagoTribune_model_v2 | c32e7515831b0eeccb4d36113682253442f503d6 | 2021-12-12T06:24:18.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/ChicagoTribune_model_v2 | 1 | null | transformers | 27,902 | Entry not found |
Declan/ChicagoTribune_model_v4 | 92dad9eae4d8be3e2bd3446d95ccbf02d3b88894 | 2021-12-15T09:02:19.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/ChicagoTribune_model_v4 | 1 | null | transformers | 27,903 | Entry not found |
Declan/ChicagoTribune_model_v6 | 5ffeeeacfcf6312fc01f251f654b03b045c0aa41 | 2021-12-15T10:23:48.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/ChicagoTribune_model_v6 | 1 | null | transformers | 27,904 | Entry not found |
Declan/FoxNews_model_v5 | b5ce9836103ba797733e45257c1aec3426dc308f | 2021-12-15T15:59:05.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/FoxNews_model_v5 | 1 | null | transformers | 27,905 | Entry not found |
Declan/HuffPost_model_v4 | df5b93956f7acceebc5ed68adf2830343c164a56 | 2021-12-15T18:04:12.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/HuffPost_model_v4 | 1 | null | transformers | 27,906 | Entry not found |
Declan/NPR_model_v4 | 8c7832568b01d14d775f61488808513814e38323 | 2021-12-16T02:25:21.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/NPR_model_v4 | 1 | null | transformers | 27,907 | Entry not found |
Declan/NPR_model_v6 | 3deb0aa7cc5d3a77c8156cb3d0573038be3aac32 | 2021-12-19T13:58:50.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/NPR_model_v6 | 1 | null | transformers | 27,908 | Entry not found |
Declan/NewYorkTimes_model_v6 | 99997910d35b4fb66b84d11210e22aa03ebb29be | 2021-12-19T14:56:06.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/NewYorkTimes_model_v6 | 1 | null | transformers | 27,909 | Entry not found |
Declan/Reuters_model_v3 | 7c6b304da9cb376c6edb2a22b6bf807936c85aab | 2021-12-16T10:45:52.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/Reuters_model_v3 | 1 | null | transformers | 27,910 | Entry not found |
Declan/Reuters_model_v4 | 18701293bceb0fd657739329c6884ea5605c7898 | 2021-12-16T18:21:25.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/Reuters_model_v4 | 1 | null | transformers | 27,911 | Entry not found |
Declan/Reuters_model_v6 | 7e624b0a217cf606043e1e812fff07613937674a | 2021-12-19T17:02:16.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/Reuters_model_v6 | 1 | null | transformers | 27,912 | Entry not found |
Declan/WallStreetJournal_model_v4 | a17111519600169856c2885d570713fc169ea9ed | 2021-12-18T01:06:27.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Declan | null | Declan/WallStreetJournal_model_v4 | 1 | null | transformers | 27,913 | Entry not found |
Denny29/DialoGPT-medium-asunayuuki | 8796e80932953e58dce830f4d764954cf04edf9f | 2021-09-23T09:34:32.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Denny29 | null | Denny29/DialoGPT-medium-asunayuuki | 1 | null | transformers | 27,914 | ---
tags:
- conversational
---
# Asuna Yuuki DialoGPT Model |
DeskDown/MarianMixFT_en-fil | 00578087e074776a0c3f59ad3ccb6df5a90423c7 | 2022-01-14T21:38:12.000Z | [
"pytorch",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DeskDown | null | DeskDown/MarianMixFT_en-fil | 1 | null | transformers | 27,915 | Entry not found |
DeskDown/MarianMixFT_en-id | 6ebc0bfd1152413b615d93b419ab84ddaf53e260 | 2022-01-14T22:28:11.000Z | [
"pytorch",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DeskDown | null | DeskDown/MarianMixFT_en-id | 1 | null | transformers | 27,916 | Entry not found |
DeskDown/MarianMixFT_en-my | d3f4dff02d7c993e4004b2c192958858a2e8f229 | 2022-01-14T21:02:20.000Z | [
"pytorch",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DeskDown | null | DeskDown/MarianMixFT_en-my | 1 | null | transformers | 27,917 | Entry not found |
DeskDown/MarianMix_en-ja-10 | e566fecb1bb3629347abc830e3f40e0957fcab10 | 2022-02-09T00:27:05.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DeskDown | null | DeskDown/MarianMix_en-ja-10 | 1 | null | transformers | 27,918 | Entry not found |
DeskDown/MarianMix_en-zh-10 | bd264264c3458c09a716c848cb767fdd3160ffa6 | 2022-01-13T23:56:21.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | DeskDown | null | DeskDown/MarianMix_en-zh-10 | 1 | null | transformers | 27,919 | Entry not found |
DicoTiar/wisdomfiy | 59491038e914d02d8e887829904dadaa2636e228 | 2021-09-22T07:07:53.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | DicoTiar | null | DicoTiar/wisdomfiy | 1 | null | transformers | 27,920 | Entry not found |
Dimedrolza/DialoGPT-small-cyberpunk | 924411b46fe0ddbdc1f94d902607e1961af07311 | 2021-08-29T05:07:53.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Dimedrolza | null | Dimedrolza/DialoGPT-small-cyberpunk | 1 | null | transformers | 27,921 | ---
tags:
- conversational
---
# V DialoGPT Model |
Doiman/DialoGPT-medium-harrypotter | f20ada5e54683fa510125c35b17004e590673287 | 2021-09-03T09:40:14.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Doiman | null | Doiman/DialoGPT-medium-harrypotter | 1 | null | transformers | 27,922 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Medium Model |
Doogie/Wayne_summary_ENG | 8fe9fe95161540f70c14f6a64fa92923e6b61752 | 2022-01-18T05:12:00.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Doogie | null | Doogie/Wayne_summary_ENG | 1 | null | transformers | 27,923 | Entry not found |
Doohae/p_encoder | d2a2d81ea1a9330ef57afb30aaa6617ce3b13cef | 2022-02-09T17:24:29.000Z | [
"pytorch"
] | null | false | Doohae | null | Doohae/p_encoder | 1 | null | null | 27,924 | Entry not found |
Doohae/q_encoder | f076ce024f921295b48d7822291aed7f3f33d825 | 2022-02-09T17:30:34.000Z | [
"pytorch"
] | null | false | Doohae | null | Doohae/q_encoder | 1 | null | null | 27,925 | Entry not found |
Dreyzin/DialoGPT-medium-avatar | 17e49735c4f28d09b15e19b9512d850c08d705e5 | 2022-01-19T04:49:52.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Dreyzin | null | Dreyzin/DialoGPT-medium-avatar | 1 | null | transformers | 27,926 | ---
tags:
- conversational
---
#Uncle Iroh DialoGPT Model |
DrishtiSharma/wav2vec2-large-xls-r-300m-ab-CV7 | 24823294659ff2641ef85521d5485079a1949e23 | 2022-03-24T11:54:32.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"ab",
"dataset:mozilla-foundation/common_voice_7_0",
"transformers",
"mozilla-foundation/common_voice_7_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-ab-CV7 | 1 | null | transformers | 27,927 | ---
language:
- ab
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_7_0
- generated_from_trainer
- ab
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_7_0
model-index:
- name: wav2vec2-large-xls-r-300m-ab-CV7
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 7
type: mozilla-foundation/common_voice_7_0
args: ab
metrics:
- name: Test WER
type: wer
value: 0.5291160452450775
- name: Test CER
type: cer
value: 0.10630270750110964
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: ab
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - AB dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5620
- Wer: 0.5651
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-ab-CV7 --dataset mozilla-foundation/common_voice_7_0 --config ab --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
NA
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7.5e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 2000
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 9.6445 | 13.64 | 300 | 4.3963 | 1.0 |
| 3.6459 | 27.27 | 600 | 3.2267 | 1.0 |
| 3.0978 | 40.91 | 900 | 3.0927 | 1.0 |
| 2.8357 | 54.55 | 1200 | 2.1462 | 1.0029 |
| 1.2723 | 68.18 | 1500 | 0.6747 | 0.6996 |
| 0.6528 | 81.82 | 1800 | 0.5928 | 0.6422 |
| 0.4905 | 95.45 | 2100 | 0.5587 | 0.5681 |
### Framework versions
- Transformers 4.16.0.dev0
- Pytorch 1.10.1+cu102
- Datasets 1.17.1.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-ab-v4 | 18917b47be1338cdcba9af336df83f54ae148345 | 2022-01-26T01:35:38.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"ab",
"dataset:common_voice",
"transformers",
"mozilla-foundation/common_voice_7_0",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-ab-v4 | 1 | null | transformers | 27,928 | ---
language:
- ab
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_7_0
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: ''
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - AB dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6178
- Wer: 0.5794
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.00025
- train_batch_size: 32
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 70.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 5.2793 | 27.27 | 300 | 3.0737 | 1.0 |
| 1.5348 | 54.55 | 600 | 0.6312 | 0.6334 |
### Framework versions
- Transformers 4.16.0.dev0
- Pytorch 1.10.1+cu102
- Datasets 1.17.1.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-as-g1 | ba766d971438f8288e726cff56d640c44d610509 | 2022-03-24T11:56:37.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"as",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-as-g1 | 1 | null | transformers | 27,929 | ---
language:
- as
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- as
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-as-g1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: as
metrics:
- name: Test WER
type: wer
value: 0.6540934419202743
- name: Test CER
type: cer
value: 0.21454042646095625
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: as
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-as-g1
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - AS dataset.
It achieves the following results on the evaluation set:
- Loss: 1.3327
- Wer: 0.5744
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-as-g1 --dataset mozilla-foundation/common_voice_8_0 --config as --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Assamese language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 14.1958 | 5.26 | 100 | 7.1919 | 1.0 |
| 5.0035 | 10.51 | 200 | 3.9362 | 1.0 |
| 3.6193 | 15.77 | 300 | 3.4451 | 1.0 |
| 3.4852 | 21.05 | 400 | 3.3536 | 1.0 |
| 2.8489 | 26.31 | 500 | 1.6451 | 0.9100 |
| 0.9568 | 31.56 | 600 | 1.0514 | 0.7561 |
| 0.4865 | 36.82 | 700 | 1.0434 | 0.7184 |
| 0.322 | 42.1 | 800 | 1.0825 | 0.7210 |
| 0.2383 | 47.36 | 900 | 1.1304 | 0.6897 |
| 0.2136 | 52.62 | 1000 | 1.1150 | 0.6854 |
| 0.179 | 57.87 | 1100 | 1.2453 | 0.6875 |
| 0.1539 | 63.15 | 1200 | 1.2211 | 0.6704 |
| 0.1303 | 68.41 | 1300 | 1.2859 | 0.6747 |
| 0.1183 | 73.67 | 1400 | 1.2775 | 0.6721 |
| 0.0994 | 78.92 | 1500 | 1.2321 | 0.6404 |
| 0.0991 | 84.21 | 1600 | 1.2766 | 0.6524 |
| 0.0887 | 89.46 | 1700 | 1.3026 | 0.6344 |
| 0.0754 | 94.72 | 1800 | 1.3199 | 0.6704 |
| 0.0693 | 99.97 | 1900 | 1.3044 | 0.6361 |
| 0.0568 | 105.26 | 2000 | 1.3541 | 0.6254 |
| 0.0536 | 110.51 | 2100 | 1.3320 | 0.6249 |
| 0.0529 | 115.77 | 2200 | 1.3370 | 0.6271 |
| 0.048 | 121.05 | 2300 | 1.2757 | 0.6031 |
| 0.0419 | 126.31 | 2400 | 1.2661 | 0.6172 |
| 0.0349 | 131.56 | 2500 | 1.2897 | 0.6048 |
| 0.0309 | 136.82 | 2600 | 1.2688 | 0.5962 |
| 0.0278 | 142.1 | 2700 | 1.2885 | 0.5954 |
| 0.0254 | 147.36 | 2800 | 1.2988 | 0.5915 |
| 0.0223 | 152.62 | 2900 | 1.3153 | 0.5941 |
| 0.0216 | 157.87 | 3000 | 1.2936 | 0.5937 |
| 0.0186 | 163.15 | 3100 | 1.2906 | 0.5877 |
| 0.0156 | 168.41 | 3200 | 1.3476 | 0.5962 |
| 0.0158 | 173.67 | 3300 | 1.3363 | 0.5847 |
| 0.0142 | 178.92 | 3400 | 1.3367 | 0.5847 |
| 0.0153 | 184.21 | 3500 | 1.3105 | 0.5757 |
| 0.0119 | 189.46 | 3600 | 1.3255 | 0.5705 |
| 0.0115 | 194.72 | 3700 | 1.3340 | 0.5787 |
| 0.0103 | 199.97 | 3800 | 1.3327 | 0.5744 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-as-v9 | 7528574eb82e6e9feac9657cd9f404e97302d016 | 2022-03-24T11:54:35.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"as",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-as-v9 | 1 | null | transformers | 27,930 | ---
language:
- as
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- as
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-as-v9
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: hsb
metrics:
- name: Test WER
type: wer
value: 0.6163737676810973
- name: Test CER
type: cer
value: 0.19496397642093005
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: as
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8.0
type: mozilla-foundation/common_voice_8_0
args: as
metrics:
- name: Test WER
type: wer
value: 61.64
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-as-v9
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1679
- Wer: 0.5761
### Evaluation Command
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-as-v9 --dataset mozilla-foundation/common_voice_8_0 --config as --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Assamese (as) language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.000111
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 300
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 8.3852 | 10.51 | 200 | 3.6402 | 1.0 |
| 3.5374 | 21.05 | 400 | 3.3894 | 1.0 |
| 2.8645 | 31.56 | 600 | 1.3143 | 0.8303 |
| 1.1784 | 42.1 | 800 | 0.9417 | 0.6661 |
| 0.7805 | 52.62 | 1000 | 0.9292 | 0.6237 |
| 0.5973 | 63.15 | 1200 | 0.9489 | 0.6014 |
| 0.4784 | 73.67 | 1400 | 0.9916 | 0.5962 |
| 0.4138 | 84.21 | 1600 | 1.0272 | 0.6121 |
| 0.3491 | 94.72 | 1800 | 1.0412 | 0.5984 |
| 0.3062 | 105.26 | 2000 | 1.0769 | 0.6005 |
| 0.2707 | 115.77 | 2200 | 1.0708 | 0.5752 |
| 0.2459 | 126.31 | 2400 | 1.1285 | 0.6009 |
| 0.2234 | 136.82 | 2600 | 1.1209 | 0.5949 |
| 0.2035 | 147.36 | 2800 | 1.1348 | 0.5842 |
| 0.1876 | 157.87 | 3000 | 1.1480 | 0.5872 |
| 0.1669 | 168.41 | 3200 | 1.1496 | 0.5838 |
| 0.1595 | 178.92 | 3400 | 1.1721 | 0.5778 |
| 0.1505 | 189.46 | 3600 | 1.1654 | 0.5744 |
| 0.1486 | 199.97 | 3800 | 1.1679 | 0.5761 |
### Framework versions
- Transformers 4.16.1
- Pytorch 1.10.0+cu111
- Datasets 1.18.2
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-bas-v1 | c925e00d18e21d7bc2607b4befa7e042545f6095 | 2022-03-24T11:56:40.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"bas",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-bas-v1 | 1 | null | transformers | 27,931 | ---
language:
- bas
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- bas
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-bas-v1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: bas
metrics:
- name: Test WER
type: wer
value: 0.3566497929130234
- name: Test CER
type: cer
value: 0.1102657634184471
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: bas
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-bas-v1
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - BAS dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5997
- Wer: 0.3870
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-bas-v1 --dataset mozilla-foundation/common_voice_8_0 --config bas --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Basaa (bas) language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.000111
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 100
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 12.7076 | 5.26 | 200 | 3.6361 | 1.0 |
| 3.1657 | 10.52 | 400 | 3.0101 | 1.0 |
| 2.3987 | 15.78 | 600 | 0.9125 | 0.6774 |
| 1.0079 | 21.05 | 800 | 0.6477 | 0.5352 |
| 0.7392 | 26.31 | 1000 | 0.5432 | 0.4929 |
| 0.6114 | 31.57 | 1200 | 0.5498 | 0.4639 |
| 0.5222 | 36.83 | 1400 | 0.5220 | 0.4561 |
| 0.4648 | 42.1 | 1600 | 0.5586 | 0.4289 |
| 0.4103 | 47.36 | 1800 | 0.5337 | 0.4082 |
| 0.3692 | 52.62 | 2000 | 0.5421 | 0.3861 |
| 0.3403 | 57.88 | 2200 | 0.5549 | 0.4096 |
| 0.3011 | 63.16 | 2400 | 0.5833 | 0.3925 |
| 0.2932 | 68.42 | 2600 | 0.5674 | 0.3815 |
| 0.2696 | 73.68 | 2800 | 0.5734 | 0.3889 |
| 0.2496 | 78.94 | 3000 | 0.5968 | 0.3985 |
| 0.2289 | 84.21 | 3200 | 0.5888 | 0.3893 |
| 0.2091 | 89.47 | 3400 | 0.5849 | 0.3852 |
| 0.2005 | 94.73 | 3600 | 0.5938 | 0.3875 |
| 0.1876 | 99.99 | 3800 | 0.5997 | 0.3870 |
### Framework versions
- Transformers 4.16.1
- Pytorch 1.10.0+cu111
- Datasets 1.18.2
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-br-d2 | 1323a1736bc7b0db2c085ad0cde9d3a204a3d0b3 | 2022-03-24T11:54:37.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"br",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"generated_from_trainer",
"robust-speech-event",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-br-d2 | 1 | null | transformers | 27,932 | ---
language:
- br
license: apache-2.0
tags:
- generated_from_trainer
- robust-speech-event
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
metrics:
- wer
model-index:
- name: wav2vec2-large-xls-r-300m-br-d2
results:
- task:
type: automatic-speech-recognition
name: Speech Recognition
dataset:
type: mozilla-foundation/common_voice_8_0
name: Common Voice 8
args: br
metrics:
- type: wer
value: 0.49770598355954887
name: Test WER
- name: Test CER
type: cer
value: 0.18090500890299605
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: br
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-br-d2
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - BR dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1257
- Wer: 0.4631
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-br-d2 --dataset mozilla-foundation/common_voice_8_0 --config br --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Breton language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.00034
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 750
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 14.0379 | 0.68 | 100 | 5.6808 | 1.0 |
| 3.9145 | 1.35 | 200 | 3.1970 | 1.0 |
| 3.0293 | 2.03 | 300 | 2.9513 | 1.0 |
| 2.0927 | 2.7 | 400 | 1.4545 | 0.8887 |
| 1.1556 | 3.38 | 500 | 1.0966 | 0.7564 |
| 0.9628 | 4.05 | 600 | 0.9808 | 0.7364 |
| 0.7869 | 4.73 | 700 | 1.0488 | 0.7355 |
| 0.703 | 5.41 | 800 | 0.9500 | 0.6881 |
| 0.6657 | 6.08 | 900 | 0.9309 | 0.6259 |
| 0.5663 | 6.76 | 1000 | 0.9133 | 0.6357 |
| 0.496 | 7.43 | 1100 | 0.9890 | 0.6028 |
| 0.4748 | 8.11 | 1200 | 0.9469 | 0.5894 |
| 0.4135 | 8.78 | 1300 | 0.9270 | 0.6045 |
| 0.3579 | 9.46 | 1400 | 0.8818 | 0.5708 |
| 0.353 | 10.14 | 1500 | 0.9244 | 0.5781 |
| 0.334 | 10.81 | 1600 | 0.9009 | 0.5638 |
| 0.2917 | 11.49 | 1700 | 1.0132 | 0.5828 |
| 0.29 | 12.16 | 1800 | 0.9696 | 0.5668 |
| 0.2691 | 12.84 | 1900 | 0.9811 | 0.5455 |
| 0.25 | 13.51 | 2000 | 0.9951 | 0.5624 |
| 0.2467 | 14.19 | 2100 | 0.9653 | 0.5573 |
| 0.2242 | 14.86 | 2200 | 0.9714 | 0.5378 |
| 0.2066 | 15.54 | 2300 | 0.9829 | 0.5394 |
| 0.2075 | 16.22 | 2400 | 1.0547 | 0.5520 |
| 0.1923 | 16.89 | 2500 | 1.0014 | 0.5397 |
| 0.1919 | 17.57 | 2600 | 0.9978 | 0.5477 |
| 0.1908 | 18.24 | 2700 | 1.1064 | 0.5397 |
| 0.157 | 18.92 | 2800 | 1.0629 | 0.5238 |
| 0.159 | 19.59 | 2900 | 1.0642 | 0.5321 |
| 0.1652 | 20.27 | 3000 | 1.0207 | 0.5328 |
| 0.141 | 20.95 | 3100 | 0.9948 | 0.5312 |
| 0.1417 | 21.62 | 3200 | 1.0338 | 0.5328 |
| 0.1514 | 22.3 | 3300 | 1.0513 | 0.5313 |
| 0.1365 | 22.97 | 3400 | 1.0357 | 0.5291 |
| 0.1319 | 23.65 | 3500 | 1.0587 | 0.5167 |
| 0.1298 | 24.32 | 3600 | 1.0636 | 0.5236 |
| 0.1245 | 25.0 | 3700 | 1.1367 | 0.5280 |
| 0.1114 | 25.68 | 3800 | 1.0633 | 0.5200 |
| 0.1088 | 26.35 | 3900 | 1.0495 | 0.5210 |
| 0.1175 | 27.03 | 4000 | 1.0897 | 0.5095 |
| 0.1043 | 27.7 | 4100 | 1.0580 | 0.5309 |
| 0.0951 | 28.38 | 4200 | 1.0448 | 0.5067 |
| 0.1011 | 29.05 | 4300 | 1.0665 | 0.5137 |
| 0.0889 | 29.73 | 4400 | 1.0579 | 0.5026 |
| 0.0833 | 30.41 | 4500 | 1.0740 | 0.5037 |
| 0.0889 | 31.08 | 4600 | 1.0933 | 0.5083 |
| 0.0784 | 31.76 | 4700 | 1.0715 | 0.5089 |
| 0.0767 | 32.43 | 4800 | 1.0658 | 0.5049 |
| 0.0769 | 33.11 | 4900 | 1.1118 | 0.4979 |
| 0.0722 | 33.78 | 5000 | 1.1413 | 0.4986 |
| 0.0709 | 34.46 | 5100 | 1.0706 | 0.4885 |
| 0.0664 | 35.14 | 5200 | 1.1217 | 0.4884 |
| 0.0648 | 35.81 | 5300 | 1.1298 | 0.4941 |
| 0.0657 | 36.49 | 5400 | 1.1330 | 0.4920 |
| 0.0582 | 37.16 | 5500 | 1.0598 | 0.4835 |
| 0.0602 | 37.84 | 5600 | 1.1097 | 0.4943 |
| 0.0598 | 38.51 | 5700 | 1.0976 | 0.4876 |
| 0.0547 | 39.19 | 5800 | 1.0734 | 0.4825 |
| 0.0561 | 39.86 | 5900 | 1.0926 | 0.4850 |
| 0.0516 | 40.54 | 6000 | 1.1579 | 0.4751 |
| 0.0478 | 41.22 | 6100 | 1.1384 | 0.4706 |
| 0.0396 | 41.89 | 6200 | 1.1462 | 0.4739 |
| 0.0472 | 42.57 | 6300 | 1.1277 | 0.4732 |
| 0.0447 | 43.24 | 6400 | 1.1517 | 0.4752 |
| 0.0423 | 43.92 | 6500 | 1.1219 | 0.4784 |
| 0.0426 | 44.59 | 6600 | 1.1311 | 0.4724 |
| 0.0391 | 45.27 | 6700 | 1.1135 | 0.4692 |
| 0.0362 | 45.95 | 6800 | 1.0878 | 0.4645 |
| 0.0329 | 46.62 | 6900 | 1.1137 | 0.4668 |
| 0.0356 | 47.3 | 7000 | 1.1233 | 0.4687 |
| 0.0328 | 47.97 | 7100 | 1.1238 | 0.4653 |
| 0.0323 | 48.65 | 7200 | 1.1307 | 0.4646 |
| 0.0325 | 49.32 | 7300 | 1.1242 | 0.4645 |
| 0.03 | 50.0 | 7400 | 1.1257 | 0.4631 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-gn-k1 | 7b04c0cb51436f317941750fb19c50a9f9b97d32 | 2022-03-24T11:52:47.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"gn",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-gn-k1 | 1 | null | transformers | 27,933 | ---
language:
- gn
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- gn
- robust-speech-event
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-gn-k1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: gn
metrics:
- name: Test WER
type: wer
value: 0.711890243902439
- name: Test CER
type: cer
value: 0.13311897106109324
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: gn
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-gn-k1
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - GN dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9220
- Wer: 0.6631
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-gn-k1 --dataset mozilla-foundation/common_voice_8_0 --config gn --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
NA
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.00018
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 600
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 15.9402 | 8.32 | 100 | 6.9185 | 1.0 |
| 4.6367 | 16.64 | 200 | 3.7416 | 1.0 |
| 3.4337 | 24.96 | 300 | 3.2581 | 1.0 |
| 3.2307 | 33.32 | 400 | 2.8008 | 1.0 |
| 1.3182 | 41.64 | 500 | 0.8359 | 0.8171 |
| 0.409 | 49.96 | 600 | 0.8470 | 0.8323 |
| 0.2573 | 58.32 | 700 | 0.7823 | 0.7576 |
| 0.1969 | 66.64 | 800 | 0.8306 | 0.7424 |
| 0.1469 | 74.96 | 900 | 0.9225 | 0.7713 |
| 0.1172 | 83.32 | 1000 | 0.7903 | 0.6951 |
| 0.1017 | 91.64 | 1100 | 0.8519 | 0.6921 |
| 0.0851 | 99.96 | 1200 | 0.8129 | 0.6646 |
| 0.071 | 108.32 | 1300 | 0.8614 | 0.7043 |
| 0.061 | 116.64 | 1400 | 0.8414 | 0.6921 |
| 0.0552 | 124.96 | 1500 | 0.8649 | 0.6905 |
| 0.0465 | 133.32 | 1600 | 0.8575 | 0.6646 |
| 0.0381 | 141.64 | 1700 | 0.8802 | 0.6723 |
| 0.0338 | 149.96 | 1800 | 0.8731 | 0.6845 |
| 0.0306 | 158.32 | 1900 | 0.9003 | 0.6585 |
| 0.0236 | 166.64 | 2000 | 0.9408 | 0.6616 |
| 0.021 | 174.96 | 2100 | 0.9353 | 0.6723 |
| 0.0212 | 183.32 | 2200 | 0.9269 | 0.6570 |
| 0.0191 | 191.64 | 2300 | 0.9277 | 0.6662 |
| 0.0161 | 199.96 | 2400 | 0.9220 | 0.6631 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-hi-cv8-b2 | 681a7e9001161918e720cfa4717b7d3e2dafe307 | 2022-03-24T11:52:52.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"hi",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"robust-speech-event",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-hi-cv8-b2 | 1 | null | transformers | 27,934 | ---
language:
- hi
license: apache-2.0
tags:
- automatic-speech-recognition
- robust-speech-event
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
metrics:
- wer
model-index:
- name: wav2vec2-large-xls-r-300m-hi-cv8-b2
results:
- task:
type: automatic-speech-recognition
name: Speech Recognition
dataset:
type: mozilla-foundation/common_voice_8_0
name: Common Voice 7
args: hi
metrics:
- type: wer
value: 0.3891350503092403
name: Test WER
- name: Test CER
type: cer
value: 0.13016327327131985
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: hi
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-hi-cv8-b2
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - HI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7322
- Wer: 0.3469
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-hi-cv8-b2 --dataset mozilla-foundation/common_voice_8_0 --config hi --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Hindi language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.00025
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 700
- num_epochs: 35
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 9.6226 | 1.04 | 200 | 3.8855 | 1.0 |
| 3.4678 | 2.07 | 400 | 3.4283 | 1.0 |
| 2.3668 | 3.11 | 600 | 1.0743 | 0.7175 |
| 0.7308 | 4.15 | 800 | 0.7663 | 0.5498 |
| 0.4985 | 5.18 | 1000 | 0.6957 | 0.5001 |
| 0.3817 | 6.22 | 1200 | 0.6932 | 0.4866 |
| 0.3281 | 7.25 | 1400 | 0.7034 | 0.4983 |
| 0.2752 | 8.29 | 1600 | 0.6588 | 0.4606 |
| 0.2475 | 9.33 | 1800 | 0.6514 | 0.4328 |
| 0.219 | 10.36 | 2000 | 0.6396 | 0.4176 |
| 0.2036 | 11.4 | 2200 | 0.6867 | 0.4162 |
| 0.1793 | 12.44 | 2400 | 0.6943 | 0.4196 |
| 0.1724 | 13.47 | 2600 | 0.6862 | 0.4260 |
| 0.1554 | 14.51 | 2800 | 0.7615 | 0.4222 |
| 0.151 | 15.54 | 3000 | 0.7058 | 0.4110 |
| 0.1335 | 16.58 | 3200 | 0.7172 | 0.3986 |
| 0.1326 | 17.62 | 3400 | 0.7182 | 0.3923 |
| 0.1225 | 18.65 | 3600 | 0.6995 | 0.3910 |
| 0.1146 | 19.69 | 3800 | 0.7075 | 0.3875 |
| 0.108 | 20.73 | 4000 | 0.7297 | 0.3858 |
| 0.1048 | 21.76 | 4200 | 0.7413 | 0.3850 |
| 0.0979 | 22.8 | 4400 | 0.7452 | 0.3793 |
| 0.0946 | 23.83 | 4600 | 0.7436 | 0.3759 |
| 0.0897 | 24.87 | 4800 | 0.7289 | 0.3754 |
| 0.0854 | 25.91 | 5000 | 0.7271 | 0.3667 |
| 0.0803 | 26.94 | 5200 | 0.7378 | 0.3656 |
| 0.0752 | 27.98 | 5400 | 0.7488 | 0.3680 |
| 0.0718 | 29.02 | 5600 | 0.7185 | 0.3619 |
| 0.0702 | 30.05 | 5800 | 0.7428 | 0.3554 |
| 0.0653 | 31.09 | 6000 | 0.7447 | 0.3559 |
| 0.0638 | 32.12 | 6200 | 0.7327 | 0.3523 |
| 0.058 | 33.16 | 6400 | 0.7339 | 0.3488 |
| 0.0594 | 34.2 | 6600 | 0.7322 | 0.3469 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-hi-cv8 | e9b231b9ebf74ad7c6fdaa76a6645a08d1bb11d2 | 2022-03-24T11:54:40.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"hi",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-hi-cv8 | 1 | null | transformers | 27,935 | ---
language:
- hi
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- hi
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-hi-cv8
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: hi
metrics:
- name: Test WER
type: wer
value: 0.3628727037755008
- name: Test CER
type: cer
value: 0.11933724247521164
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: hi
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-hi-cv8
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - HI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6510
- Wer: 0.3179
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-hi-cv8 --dataset mozilla-foundation/common_voice_8_0 --config hi --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-hi-cv8 --dataset speech-recognition-community-v2/dev_data --config hi --split validation --chunk_length_s 10 --stride_length_s 1
Note: Hindi language not found in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 2000
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 12.5576 | 1.04 | 200 | 6.6594 | 1.0 |
| 4.4069 | 2.07 | 400 | 3.6011 | 1.0 |
| 3.4273 | 3.11 | 600 | 3.3370 | 1.0 |
| 2.1108 | 4.15 | 800 | 1.0641 | 0.6562 |
| 0.8817 | 5.18 | 1000 | 0.7178 | 0.5172 |
| 0.6508 | 6.22 | 1200 | 0.6612 | 0.4839 |
| 0.5524 | 7.25 | 1400 | 0.6458 | 0.4889 |
| 0.4992 | 8.29 | 1600 | 0.5791 | 0.4382 |
| 0.4669 | 9.33 | 1800 | 0.6039 | 0.4352 |
| 0.4441 | 10.36 | 2000 | 0.6276 | 0.4297 |
| 0.4172 | 11.4 | 2200 | 0.6183 | 0.4474 |
| 0.3872 | 12.44 | 2400 | 0.5886 | 0.4231 |
| 0.3692 | 13.47 | 2600 | 0.6448 | 0.4399 |
| 0.3385 | 14.51 | 2800 | 0.6344 | 0.4075 |
| 0.3246 | 15.54 | 3000 | 0.5896 | 0.4087 |
| 0.3026 | 16.58 | 3200 | 0.6158 | 0.4016 |
| 0.284 | 17.62 | 3400 | 0.6038 | 0.3906 |
| 0.2682 | 18.65 | 3600 | 0.6165 | 0.3900 |
| 0.2577 | 19.69 | 3800 | 0.5754 | 0.3805 |
| 0.2509 | 20.73 | 4000 | 0.6028 | 0.3925 |
| 0.2426 | 21.76 | 4200 | 0.6335 | 0.4138 |
| 0.2346 | 22.8 | 4400 | 0.6128 | 0.3870 |
| 0.2205 | 23.83 | 4600 | 0.6223 | 0.3831 |
| 0.2104 | 24.87 | 4800 | 0.6122 | 0.3781 |
| 0.1992 | 25.91 | 5000 | 0.6467 | 0.3792 |
| 0.1916 | 26.94 | 5200 | 0.6277 | 0.3636 |
| 0.1835 | 27.98 | 5400 | 0.6317 | 0.3773 |
| 0.1776 | 29.02 | 5600 | 0.6124 | 0.3614 |
| 0.1751 | 30.05 | 5800 | 0.6475 | 0.3628 |
| 0.1662 | 31.09 | 6000 | 0.6266 | 0.3504 |
| 0.1584 | 32.12 | 6200 | 0.6347 | 0.3532 |
| 0.1494 | 33.16 | 6400 | 0.6636 | 0.3491 |
| 0.1457 | 34.2 | 6600 | 0.6334 | 0.3507 |
| 0.1427 | 35.23 | 6800 | 0.6397 | 0.3442 |
| 0.1397 | 36.27 | 7000 | 0.6468 | 0.3496 |
| 0.1283 | 37.31 | 7200 | 0.6291 | 0.3416 |
| 0.1255 | 38.34 | 7400 | 0.6652 | 0.3461 |
| 0.1195 | 39.38 | 7600 | 0.6587 | 0.3342 |
| 0.1169 | 40.41 | 7800 | 0.6478 | 0.3319 |
| 0.1126 | 41.45 | 8000 | 0.6280 | 0.3291 |
| 0.1112 | 42.49 | 8200 | 0.6434 | 0.3290 |
| 0.1069 | 43.52 | 8400 | 0.6542 | 0.3268 |
| 0.1027 | 44.56 | 8600 | 0.6536 | 0.3239 |
| 0.0993 | 45.6 | 8800 | 0.6622 | 0.3257 |
| 0.0973 | 46.63 | 9000 | 0.6572 | 0.3192 |
| 0.0911 | 47.67 | 9200 | 0.6522 | 0.3175 |
| 0.0897 | 48.7 | 9400 | 0.6521 | 0.3200 |
| 0.0905 | 49.74 | 9600 | 0.6510 | 0.3179 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-hi-wx1 | bbd60cc6db8fd643bb3e6509f1719db2ebcddaf5 | 2022-03-23T18:35:14.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"hi",
"dataset:mozilla-foundation/common_voice_7_0",
"transformers",
"hf-asr-leaderboard",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-hi-wx1 | 1 | null | transformers | 27,936 | ---
language:
- hi
license: apache-2.0
tags:
- automatic-speech-recognition
- hf-asr-leaderboard
- robust-speech-event
datasets:
- mozilla-foundation/common_voice_7_0
metrics:
- wer
model-index:
- name: wav2vec2-large-xls-r-300m-hi-wx1
results:
- task:
type: automatic-speech-recognition
name: Speech Recognition
dataset:
type: mozilla-foundation/common_voice_7_0
name: Common Voice 7
args: hi
metrics:
- type: wer
value: 0.3719684845500431
name: Test WER
- name: Test CER
type: cer
value: 0.11763235514672798
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-hi-wx1
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 -HI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6552
- Wer: 0.3200
Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-hi-wx1 --dataset mozilla-foundation/common_voice_7_0 --config hi --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
NA
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.00024
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1800
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 12.2663 | 1.36 | 200 | 5.9245 | 1.0 |
| 4.1856 | 2.72 | 400 | 3.4968 | 1.0 |
| 3.3908 | 4.08 | 600 | 2.9970 | 1.0 |
| 1.5444 | 5.44 | 800 | 0.9071 | 0.6139 |
| 0.7237 | 6.8 | 1000 | 0.6508 | 0.4862 |
| 0.5323 | 8.16 | 1200 | 0.6217 | 0.4647 |
| 0.4426 | 9.52 | 1400 | 0.5785 | 0.4288 |
| 0.3933 | 10.88 | 1600 | 0.5935 | 0.4217 |
| 0.3532 | 12.24 | 1800 | 0.6358 | 0.4465 |
| 0.3319 | 13.6 | 2000 | 0.5789 | 0.4118 |
| 0.2877 | 14.96 | 2200 | 0.6163 | 0.4056 |
| 0.2663 | 16.33 | 2400 | 0.6176 | 0.3893 |
| 0.2511 | 17.68 | 2600 | 0.6065 | 0.3999 |
| 0.2275 | 19.05 | 2800 | 0.6183 | 0.3842 |
| 0.2098 | 20.41 | 3000 | 0.6486 | 0.3864 |
| 0.1943 | 21.77 | 3200 | 0.6365 | 0.3885 |
| 0.1877 | 23.13 | 3400 | 0.6013 | 0.3677 |
| 0.1679 | 24.49 | 3600 | 0.6451 | 0.3795 |
| 0.1667 | 25.85 | 3800 | 0.6410 | 0.3635 |
| 0.1514 | 27.21 | 4000 | 0.6000 | 0.3577 |
| 0.1453 | 28.57 | 4200 | 0.6020 | 0.3518 |
| 0.134 | 29.93 | 4400 | 0.6531 | 0.3517 |
| 0.1354 | 31.29 | 4600 | 0.6874 | 0.3578 |
| 0.1224 | 32.65 | 4800 | 0.6519 | 0.3492 |
| 0.1199 | 34.01 | 5000 | 0.6553 | 0.3490 |
| 0.1077 | 35.37 | 5200 | 0.6621 | 0.3429 |
| 0.0997 | 36.73 | 5400 | 0.6641 | 0.3413 |
| 0.0964 | 38.09 | 5600 | 0.6722 | 0.3385 |
| 0.0931 | 39.45 | 5800 | 0.6365 | 0.3363 |
| 0.0944 | 40.81 | 6000 | 0.6454 | 0.3326 |
| 0.0862 | 42.18 | 6200 | 0.6497 | 0.3256 |
| 0.0848 | 43.54 | 6400 | 0.6599 | 0.3226 |
| 0.0793 | 44.89 | 6600 | 0.6625 | 0.3232 |
| 0.076 | 46.26 | 6800 | 0.6463 | 0.3186 |
| 0.0749 | 47.62 | 7000 | 0.6559 | 0.3225 |
| 0.0663 | 48.98 | 7200 | 0.6552 | 0.3200 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-maltese | e29d63b9690d3080c3d9f8e6b6a51f6849a49b73 | 2022-03-23T18:35:17.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"mt",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"generated_from_trainer",
"hf-asr-leaderboard",
"model_for_talk",
"mozilla-foundation/common_voice_8_0",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-maltese | 1 | null | transformers | 27,937 | ---
language:
- mt
license: apache-2.0
tags:
- automatic-speech-recognition
- generated_from_trainer
- hf-asr-leaderboard
- model_for_talk
- mozilla-foundation/common_voice_8_0
- mt
- robust-speech-event
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-maltese
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: mt
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-maltese
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - MT dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2994
- Wer: 0.2781
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 32
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1800
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 3.0174 | 9.01 | 1000 | 3.0552 | 1.0 |
| 1.0446 | 18.02 | 2000 | 0.6708 | 0.7577 |
| 0.7995 | 27.03 | 3000 | 0.4202 | 0.4770 |
| 0.6978 | 36.04 | 4000 | 0.3054 | 0.3494 |
| 0.6189 | 45.05 | 5000 | 0.2878 | 0.3154 |
| 0.5667 | 54.05 | 6000 | 0.3114 | 0.3286 |
| 0.5173 | 63.06 | 7000 | 0.3085 | 0.3021 |
| 0.4682 | 72.07 | 8000 | 0.3058 | 0.2969 |
| 0.451 | 81.08 | 9000 | 0.3146 | 0.2907 |
| 0.4213 | 90.09 | 10000 | 0.3030 | 0.2881 |
| 0.4005 | 99.1 | 11000 | 0.3001 | 0.2789 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
### Evaluation Script
!python eval.py \
--model_id DrishtiSharma/wav2vec2-large-xls-r-300m-maltese \
--dataset mozilla-foundation/common_voice_8_0 --config mt --split test --log_outputs |
DrishtiSharma/wav2vec2-large-xls-r-300m-myv-v1 | 1eb3e4e3b03f7b6409756681526418d4ee8f11ee | 2022-03-24T11:56:53.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"myv",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-myv-v1 | 1 | null | transformers | 27,938 | ---
language:
- myv
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- myv
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-myv-v1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: myv
metrics:
- name: Test WER
type: wer
value: 0.599548532731377
- name: Test CER
type: cer
value: 0.12953851902597
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: myv
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-myv-v1
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - MYV dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8537
- Wer: 0.6160
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-myv-v1 --dataset mozilla-foundation/common_voice_8_0 --config myv --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Erzya language not found in speech-recognition-community-v2/dev_data!
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.000222
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 150
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 19.453 | 1.92 | 50 | 16.4001 | 1.0 |
| 9.6875 | 3.85 | 100 | 5.4468 | 1.0 |
| 4.9988 | 5.77 | 150 | 4.3507 | 1.0 |
| 4.1148 | 7.69 | 200 | 3.6753 | 1.0 |
| 3.4922 | 9.62 | 250 | 3.3103 | 1.0 |
| 3.2443 | 11.54 | 300 | 3.1741 | 1.0 |
| 3.164 | 13.46 | 350 | 3.1346 | 1.0 |
| 3.0954 | 15.38 | 400 | 3.0428 | 1.0 |
| 3.0076 | 17.31 | 450 | 2.9137 | 1.0 |
| 2.6883 | 19.23 | 500 | 2.1476 | 0.9978 |
| 1.5124 | 21.15 | 550 | 0.8955 | 0.8225 |
| 0.8711 | 23.08 | 600 | 0.6948 | 0.7591 |
| 0.6695 | 25.0 | 650 | 0.6683 | 0.7636 |
| 0.5606 | 26.92 | 700 | 0.6821 | 0.7435 |
| 0.503 | 28.85 | 750 | 0.7220 | 0.7516 |
| 0.4528 | 30.77 | 800 | 0.6638 | 0.7324 |
| 0.4219 | 32.69 | 850 | 0.7120 | 0.7435 |
| 0.4109 | 34.62 | 900 | 0.7122 | 0.7511 |
| 0.3887 | 36.54 | 950 | 0.7179 | 0.7199 |
| 0.3895 | 38.46 | 1000 | 0.7322 | 0.7525 |
| 0.391 | 40.38 | 1050 | 0.6850 | 0.7364 |
| 0.3537 | 42.31 | 1100 | 0.7571 | 0.7279 |
| 0.3267 | 44.23 | 1150 | 0.7575 | 0.7257 |
| 0.3195 | 46.15 | 1200 | 0.7580 | 0.6998 |
| 0.2891 | 48.08 | 1250 | 0.7452 | 0.7101 |
| 0.294 | 50.0 | 1300 | 0.7316 | 0.6945 |
| 0.2854 | 51.92 | 1350 | 0.7241 | 0.6757 |
| 0.2801 | 53.85 | 1400 | 0.7532 | 0.6887 |
| 0.2502 | 55.77 | 1450 | 0.7587 | 0.6811 |
| 0.2427 | 57.69 | 1500 | 0.7231 | 0.6851 |
| 0.2311 | 59.62 | 1550 | 0.7288 | 0.6632 |
| 0.2176 | 61.54 | 1600 | 0.7711 | 0.6664 |
| 0.2117 | 63.46 | 1650 | 0.7914 | 0.6940 |
| 0.2114 | 65.38 | 1700 | 0.8065 | 0.6918 |
| 0.1913 | 67.31 | 1750 | 0.8372 | 0.6945 |
| 0.1897 | 69.23 | 1800 | 0.8051 | 0.6869 |
| 0.1865 | 71.15 | 1850 | 0.8076 | 0.6740 |
| 0.1844 | 73.08 | 1900 | 0.7935 | 0.6708 |
| 0.1757 | 75.0 | 1950 | 0.8015 | 0.6610 |
| 0.1636 | 76.92 | 2000 | 0.7614 | 0.6414 |
| 0.1637 | 78.85 | 2050 | 0.8123 | 0.6592 |
| 0.1599 | 80.77 | 2100 | 0.7907 | 0.6566 |
| 0.1498 | 82.69 | 2150 | 0.8641 | 0.6757 |
| 0.1545 | 84.62 | 2200 | 0.7438 | 0.6682 |
| 0.1433 | 86.54 | 2250 | 0.8014 | 0.6624 |
| 0.1427 | 88.46 | 2300 | 0.7758 | 0.6646 |
| 0.1423 | 90.38 | 2350 | 0.7741 | 0.6423 |
| 0.1298 | 92.31 | 2400 | 0.7938 | 0.6414 |
| 0.1111 | 94.23 | 2450 | 0.7976 | 0.6467 |
| 0.1243 | 96.15 | 2500 | 0.7916 | 0.6481 |
| 0.1215 | 98.08 | 2550 | 0.7594 | 0.6392 |
| 0.113 | 100.0 | 2600 | 0.8236 | 0.6392 |
| 0.1077 | 101.92 | 2650 | 0.7959 | 0.6347 |
| 0.0988 | 103.85 | 2700 | 0.8189 | 0.6392 |
| 0.0953 | 105.77 | 2750 | 0.8157 | 0.6414 |
| 0.0889 | 107.69 | 2800 | 0.7946 | 0.6369 |
| 0.0929 | 109.62 | 2850 | 0.8255 | 0.6360 |
| 0.0822 | 111.54 | 2900 | 0.8320 | 0.6334 |
| 0.086 | 113.46 | 2950 | 0.8539 | 0.6490 |
| 0.0825 | 115.38 | 3000 | 0.8438 | 0.6418 |
| 0.0727 | 117.31 | 3050 | 0.8568 | 0.6481 |
| 0.0717 | 119.23 | 3100 | 0.8447 | 0.6512 |
| 0.0815 | 121.15 | 3150 | 0.8470 | 0.6445 |
| 0.0689 | 123.08 | 3200 | 0.8264 | 0.6249 |
| 0.0726 | 125.0 | 3250 | 0.7981 | 0.6169 |
| 0.0648 | 126.92 | 3300 | 0.8237 | 0.6200 |
| 0.0632 | 128.85 | 3350 | 0.8416 | 0.6249 |
| 0.06 | 130.77 | 3400 | 0.8276 | 0.6173 |
| 0.0616 | 132.69 | 3450 | 0.8429 | 0.6209 |
| 0.0614 | 134.62 | 3500 | 0.8485 | 0.6271 |
| 0.0539 | 136.54 | 3550 | 0.8598 | 0.6218 |
| 0.0555 | 138.46 | 3600 | 0.8557 | 0.6169 |
| 0.0604 | 140.38 | 3650 | 0.8436 | 0.6186 |
| 0.0556 | 142.31 | 3700 | 0.8428 | 0.6178 |
| 0.051 | 144.23 | 3750 | 0.8440 | 0.6142 |
| 0.0526 | 146.15 | 3800 | 0.8566 | 0.6142 |
| 0.052 | 148.08 | 3850 | 0.8544 | 0.6178 |
| 0.0519 | 150.0 | 3900 | 0.8537 | 0.6160 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.2
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-or-dx12 | d54d0f6d1f74e256e39ff01a8a2144e774b1b4ad | 2022-03-23T18:33:15.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"or",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"hf-asr-leaderboard",
"model_for_talk",
"mozilla-foundation/common_voice_8_0",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-or-dx12 | 1 | null | transformers | 27,939 | ---
language:
- or
license: apache-2.0
tags:
- automatic-speech-recognition
- generated_from_trainer
- hf-asr-leaderboard
- model_for_talk
- mozilla-foundation/common_voice_8_0
- or
- robust-speech-event
datasets:
- common_voice
model-index:
- name: wav2vec2-large-xls-r-300m-or-dx12
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: or
metrics:
- name: Test WER
type: wer
value: 0.5947242206235012
- name: Test CER
type: cer
value: 0.18272388876724327
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: or
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-or-dx12
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4638
- Wer: 0.5602
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-or-dx12 --dataset mozilla-foundation/common_voice_8_0 --config or --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Oriya language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0004
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 13.5059 | 4.17 | 100 | 10.3789 | 1.0 |
| 4.5964 | 8.33 | 200 | 4.3294 | 1.0 |
| 3.4448 | 12.5 | 300 | 3.7903 | 1.0 |
| 3.3683 | 16.67 | 400 | 3.5289 | 1.0 |
| 2.042 | 20.83 | 500 | 1.1531 | 0.7857 |
| 0.5721 | 25.0 | 600 | 1.0267 | 0.7646 |
| 0.3274 | 29.17 | 700 | 1.0773 | 0.6938 |
| 0.2466 | 33.33 | 800 | 1.0323 | 0.6647 |
| 0.2047 | 37.5 | 900 | 1.1255 | 0.6733 |
| 0.1847 | 41.67 | 1000 | 1.1194 | 0.6515 |
| 0.1453 | 45.83 | 1100 | 1.1215 | 0.6601 |
| 0.1367 | 50.0 | 1200 | 1.1898 | 0.6627 |
| 0.1334 | 54.17 | 1300 | 1.3082 | 0.6687 |
| 0.1041 | 58.33 | 1400 | 1.2514 | 0.6177 |
| 0.1024 | 62.5 | 1500 | 1.2055 | 0.6528 |
| 0.0919 | 66.67 | 1600 | 1.4125 | 0.6369 |
| 0.074 | 70.83 | 1700 | 1.4006 | 0.6634 |
| 0.0681 | 75.0 | 1800 | 1.3943 | 0.6131 |
| 0.0709 | 79.17 | 1900 | 1.3545 | 0.6296 |
| 0.064 | 83.33 | 2000 | 1.2437 | 0.6237 |
| 0.0552 | 87.5 | 2100 | 1.3762 | 0.6190 |
| 0.056 | 91.67 | 2200 | 1.3763 | 0.6323 |
| 0.0514 | 95.83 | 2300 | 1.2897 | 0.6164 |
| 0.0409 | 100.0 | 2400 | 1.4257 | 0.6104 |
| 0.0379 | 104.17 | 2500 | 1.4219 | 0.5853 |
| 0.0367 | 108.33 | 2600 | 1.4361 | 0.6032 |
| 0.0412 | 112.5 | 2700 | 1.4713 | 0.6098 |
| 0.0353 | 116.67 | 2800 | 1.4132 | 0.6369 |
| 0.0336 | 120.83 | 2900 | 1.5210 | 0.6098 |
| 0.0302 | 125.0 | 3000 | 1.4686 | 0.5939 |
| 0.0398 | 129.17 | 3100 | 1.5456 | 0.6204 |
| 0.0291 | 133.33 | 3200 | 1.4111 | 0.5827 |
| 0.0247 | 137.5 | 3300 | 1.3866 | 0.6151 |
| 0.0196 | 141.67 | 3400 | 1.4513 | 0.5880 |
| 0.0218 | 145.83 | 3500 | 1.5100 | 0.5899 |
| 0.0196 | 150.0 | 3600 | 1.4936 | 0.5999 |
| 0.0164 | 154.17 | 3700 | 1.5012 | 0.5701 |
| 0.0168 | 158.33 | 3800 | 1.5601 | 0.5919 |
| 0.0151 | 162.5 | 3900 | 1.4891 | 0.5761 |
| 0.0137 | 166.67 | 4000 | 1.4839 | 0.5800 |
| 0.0143 | 170.83 | 4100 | 1.4826 | 0.5754 |
| 0.0114 | 175.0 | 4200 | 1.4950 | 0.5708 |
| 0.0092 | 179.17 | 4300 | 1.5008 | 0.5694 |
| 0.0104 | 183.33 | 4400 | 1.4774 | 0.5728 |
| 0.0096 | 187.5 | 4500 | 1.4948 | 0.5767 |
| 0.0105 | 191.67 | 4600 | 1.4557 | 0.5694 |
| 0.009 | 195.83 | 4700 | 1.4615 | 0.5628 |
| 0.0081 | 200.0 | 4800 | 1.4638 | 0.5602 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-sat-a3 | 37c5eecf02a124f4c94d84de917483ec5b8a816b | 2022-03-24T11:56:55.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"sat",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-sat-a3 | 1 | null | transformers | 27,940 | ---
language:
- sat
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- sat
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-sat-a3
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: sat
metrics:
- name: Test WER
type: wer
value: 0.357429718875502
- name: Test CER
type: cer
value: 0.14203730272596843
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: sat
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-sat-a3
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - SAT dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8961
- Wer: 0.3976
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sat-a3 --dataset mozilla-foundation/common_voice_8_0 --config sat --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Note: Santali (Ol Chiki) language not found in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0004
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 200
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 11.1266 | 33.29 | 100 | 2.8577 | 1.0 |
| 2.1549 | 66.57 | 200 | 1.0799 | 0.5542 |
| 0.5628 | 99.86 | 300 | 0.7973 | 0.4016 |
| 0.0779 | 133.29 | 400 | 0.8424 | 0.4177 |
| 0.0404 | 166.57 | 500 | 0.9048 | 0.4137 |
| 0.0212 | 199.86 | 600 | 0.8961 | 0.3976 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-sat-final | c7f3841f792b773082cc6efab26c5e3115054de2 | 2022-03-24T11:56:58.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"sat",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-sat-final | 1 | null | transformers | 27,941 | ---
language:
- sat
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- sat
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-sat-final
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: sat
metrics:
- name: Test WER
type: wer
value: 0.3493975903614458
- name: Test CER
type: cer
value: 0.13773314203730272
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: sat
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-sat-final
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - SAT dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8012
- Wer: 0.3815
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sat-final --dataset mozilla-foundation/common_voice_8_0 --config sat --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sat-final --dataset speech-recognition-community-v2/dev_data --config sat --split validation --chunk_length_s 10 --stride_length_s 1
**Note: Santali (Ol Chiki) language not found in speech-recognition-community-v2/dev_data**
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0004
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 170
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 10.6317 | 33.29 | 100 | 2.8629 | 1.0 |
| 2.047 | 66.57 | 200 | 0.9516 | 0.5703 |
| 0.4475 | 99.86 | 300 | 0.8539 | 0.3896 |
| 0.0716 | 133.29 | 400 | 0.8277 | 0.3454 |
| 0.047 | 166.57 | 500 | 0.7597 | 0.3655 |
| 0.0249 | 199.86 | 600 | 0.8012 | 0.3815 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-sl-with-LM-v1 | a374e82f687b503e02b49528b291c7bc934325b3 | 2022-03-23T18:35:19.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"sl",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"generated_from_trainer",
"hf-asr-leaderboard",
"model_for_talk",
"mozilla-foundation/common_voice_8_0",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-sl-with-LM-v1 | 1 | null | transformers | 27,942 | ---
language:
- sl
license: apache-2.0
tags:
- automatic-speech-recognition
- generated_from_trainer
- hf-asr-leaderboard
- model_for_talk
- mozilla-foundation/common_voice_8_0
- robust-speech-event
- sl
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-sl-with-LM-v1
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: sl
metrics:
- name: Test WER
type: wer
value: 0.20626555409164105
- name: Test CER
type: cer
value: 0.051648321634392154
- name: Test WER (+LM)
type: wer
value: 0.13482652613087395
- name: Test CER (+LM)
type: cer
value: 0.038838663862562475
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: sl
metrics:
- name: Dev WER
type: wer
value: 0.5406156320830592
- name: Dev CER
type: cer
value: 0.22249723590310583
- name: Dev WER (+LM)
type: wer
value: 0.49783147459727384
- name: Dev CER (+LM)
type: cer
value: 0.1591062599627158
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Test Data
type: speech-recognition-community-v2/eval_data
args: sl
metrics:
- name: Test WER
type: wer
value: 46.17
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - SL dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2756
- Wer: 0.2279
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sl-with-LM-v1 --dataset mozilla-foundation/common_voice_8_0 --config sl --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sl-with-LM-v1 --dataset speech-recognition-community-v2/dev_data --config sl --split validation --chunk_length_s 10 --stride_length_s 1
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7.1e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 3.3881 | 6.1 | 500 | 2.9710 | 1.0 |
| 2.6401 | 12.2 | 1000 | 1.7677 | 0.9734 |
| 1.5152 | 18.29 | 1500 | 0.5564 | 0.6011 |
| 1.2191 | 24.39 | 2000 | 0.4319 | 0.4390 |
| 1.0237 | 30.49 | 2500 | 0.3141 | 0.3175 |
| 0.8892 | 36.59 | 3000 | 0.2748 | 0.2689 |
| 0.8296 | 42.68 | 3500 | 0.2680 | 0.2534 |
| 0.7602 | 48.78 | 4000 | 0.2820 | 0.2506 |
| 0.7186 | 54.88 | 4500 | 0.2672 | 0.2398 |
| 0.6887 | 60.98 | 5000 | 0.2729 | 0.2402 |
| 0.6507 | 67.07 | 5500 | 0.2767 | 0.2361 |
| 0.6226 | 73.17 | 6000 | 0.2817 | 0.2332 |
| 0.6024 | 79.27 | 6500 | 0.2679 | 0.2279 |
| 0.5787 | 85.37 | 7000 | 0.2837 | 0.2316 |
| 0.5744 | 91.46 | 7500 | 0.2838 | 0.2284 |
| 0.5556 | 97.56 | 8000 | 0.2763 | 0.2281 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-sl-with-LM-v2 | a60641c15fec9390c763597cd26259ad6433bc0b | 2022-03-23T18:35:22.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"sl",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"generated_from_trainer",
"hf-asr-leaderboard",
"model_for_talk",
"mozilla-foundation/common_voice_8_0",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-sl-with-LM-v2 | 1 | null | transformers | 27,943 | ---
language:
- sl
license: apache-2.0
tags:
- automatic-speech-recognition
- generated_from_trainer
- hf-asr-leaderboard
- model_for_talk
- mozilla-foundation/common_voice_8_0
- robust-speech-event
- sl
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-sl-with-LM-v2
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: sl
metrics:
- name: Test WER
type: wer
value: 0.21695212999560826
- name: Test CER
type: cer
value: 0.052850080572474256
- name: Test WER (+LM)
type: wer
value: 0.14551310203484116
- name: Test CER (+LM)
type: cer
value: 0.03927566711277415
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: sl
metrics:
- name: Dev WER
type: wer
value: 0.560722380639029
- name: Dev CER
type: cer
value: 0.2279626093074681
- name: Dev WER (+LM)
type: wer
value: 0.46486802661402354
- name: Dev CER (+LM)
type: cer
value: 0.21105136194592422
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Test Data
type: speech-recognition-community-v2/eval_data
args: sl
metrics:
- name: Test WER
type: wer
value: 46.69
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - SL dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2855
- Wer: 0.2401
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sl-with-LM-v2 --dataset mozilla-foundation/common_voice_8_0 --config sl --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sl-with-LM-v2 --dataset speech-recognition-community-v2/dev_data --config sl --split validation --chunk_length_s 10 --stride_length_s 1
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 100.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 6.9294 | 6.1 | 500 | 2.9712 | 1.0 |
| 2.8305 | 12.2 | 1000 | 1.7073 | 0.9479 |
| 1.4795 | 18.29 | 1500 | 0.5756 | 0.6397 |
| 1.3433 | 24.39 | 2000 | 0.4968 | 0.5424 |
| 1.1766 | 30.49 | 2500 | 0.4185 | 0.4743 |
| 1.0017 | 36.59 | 3000 | 0.3303 | 0.3578 |
| 0.9358 | 42.68 | 3500 | 0.3003 | 0.3051 |
| 0.8358 | 48.78 | 4000 | 0.3045 | 0.2884 |
| 0.7647 | 54.88 | 4500 | 0.2866 | 0.2677 |
| 0.7482 | 60.98 | 5000 | 0.2829 | 0.2585 |
| 0.6943 | 67.07 | 5500 | 0.2782 | 0.2478 |
| 0.6586 | 73.17 | 6000 | 0.2911 | 0.2537 |
| 0.6425 | 79.27 | 6500 | 0.2817 | 0.2462 |
| 0.6067 | 85.37 | 7000 | 0.2910 | 0.2436 |
| 0.5974 | 91.46 | 7500 | 0.2875 | 0.2430 |
| 0.5812 | 97.56 | 8000 | 0.2852 | 0.2396 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-sr-v20 | 65fe3279f05bdef2755eb9993b8b1135895b8e78 | 2022-03-24T11:54:50.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"sr",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"model_for_talk",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-sr-v20 | 1 | null | transformers | 27,944 | ---
language:
- sr
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
- sr
- robust-speech-event
- model_for_talk
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-sr-v20
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: sr
metrics:
- name: Test WER
type: wer
value: 0.3313112459169389
- name: Test CER
type: cer
value: 0.11472902097902098
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: sl
metrics:
- name: Test WER
type: wer
value: 0.953810623556582
- name: Test CER
type: cer
value: 0.8068880824888259
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: sr
metrics:
- name: Test WER
type: wer
value: 95.38
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Test Data
type: speech-recognition-community-v2/eval_data
args: sr
metrics:
- name: Test WER
type: wer
value: 95.14
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-sr-v20
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - SR dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6695
- Wer: 0.3355
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sr-v20 --dataset mozilla-foundation/common_voice_8_0 --config sr --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sr-v20 --dataset speech-recognition-community-v2/dev_data --config sr --split validation --chunk_length_s 10 --stride_length_s 1
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 800
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 8.2198 | 7.5 | 300 | 2.9960 | 1.0 |
| 2.0533 | 15.0 | 600 | 0.6508 | 0.6314 |
| 0.4176 | 22.5 | 900 | 0.5726 | 0.5170 |
| 0.2327 | 30.0 | 1200 | 0.5771 | 0.5296 |
| 0.1723 | 37.5 | 1500 | 0.5508 | 0.4377 |
| 0.1226 | 45.0 | 1800 | 0.6567 | 0.4363 |
| 0.1101 | 52.5 | 2100 | 0.5819 | 0.4452 |
| 0.0934 | 60.0 | 2400 | 0.6449 | 0.4354 |
| 0.0752 | 67.5 | 2700 | 0.5584 | 0.4162 |
| 0.0645 | 75.0 | 3000 | 0.6289 | 0.4162 |
| 0.0539 | 82.5 | 3300 | 0.6153 | 0.4232 |
| 0.0482 | 90.0 | 3600 | 0.6772 | 0.4811 |
| 0.0441 | 97.5 | 3900 | 0.6156 | 0.4582 |
| 0.0403 | 105.0 | 4200 | 0.6077 | 0.3971 |
| 0.0371 | 112.5 | 4500 | 0.7354 | 0.4148 |
| 0.0279 | 120.0 | 4800 | 0.6316 | 0.3598 |
| 0.0198 | 127.5 | 5100 | 0.6615 | 0.3626 |
| 0.0185 | 135.0 | 5400 | 0.6914 | 0.3658 |
| 0.0183 | 142.5 | 5700 | 0.7087 | 0.3742 |
| 0.0154 | 150.0 | 6000 | 0.6930 | 0.3542 |
| 0.0143 | 157.5 | 6300 | 0.6787 | 0.3383 |
| 0.0118 | 165.0 | 6600 | 0.6347 | 0.3476 |
| 0.0101 | 172.5 | 6900 | 0.6235 | 0.3434 |
| 0.0103 | 180.0 | 7200 | 0.6078 | 0.3434 |
| 0.0063 | 187.5 | 7500 | 0.6740 | 0.3411 |
| 0.0057 | 195.0 | 7800 | 0.6695 | 0.3355 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-sr-v4 | 1ca06a64973861a02dcb7a6671ebfc1c56a105ba | 2022-03-23T18:35:24.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"sr",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"generated_from_trainer",
"hf-asr-leaderboard",
"model_for_talk",
"mozilla-foundation/common_voice_8_0",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-sr-v4 | 1 | null | transformers | 27,945 | ---
language:
- sr
license: apache-2.0
tags:
- automatic-speech-recognition
- generated_from_trainer
- hf-asr-leaderboard
- model_for_talk
- mozilla-foundation/common_voice_8_0
- robust-speech-event
- sr
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-sr-v4
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: sr
metrics:
- name: Test WER
type: wer
value: 0.303313
- name: Test CER
type: cer
value: 0.1048951
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: sr
metrics:
- name: Test WER
type: wer
value: 0.9486784706184245
- name: Test CER
type: cer
value: 0.8084369606584945
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Test Data
type: speech-recognition-community-v2/eval_data
args: sr
metrics:
- name: Test WER
type: wer
value: 94.53
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-sr-v4
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - SR dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5570
- Wer: 0.3038
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sr-v4 --dataset mozilla-foundation/common_voice_8_0 --config sr --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-sr-v4 --dataset speech-recognition-community-v2/dev_data --config sr --split validation --chunk_length_s 10 --stride_length_s 1
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 800
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 8.2934 | 7.5 | 300 | 2.9777 | 0.9995 |
| 1.5049 | 15.0 | 600 | 0.5036 | 0.4806 |
| 0.3263 | 22.5 | 900 | 0.5822 | 0.4055 |
| 0.2008 | 30.0 | 1200 | 0.5609 | 0.4032 |
| 0.1543 | 37.5 | 1500 | 0.5203 | 0.3710 |
| 0.1158 | 45.0 | 1800 | 0.6458 | 0.3985 |
| 0.0997 | 52.5 | 2100 | 0.6227 | 0.4013 |
| 0.0834 | 60.0 | 2400 | 0.6048 | 0.3836 |
| 0.0665 | 67.5 | 2700 | 0.6197 | 0.3686 |
| 0.0602 | 75.0 | 3000 | 0.5418 | 0.3453 |
| 0.0524 | 82.5 | 3300 | 0.5310 | 0.3486 |
| 0.0445 | 90.0 | 3600 | 0.5599 | 0.3374 |
| 0.0406 | 97.5 | 3900 | 0.5958 | 0.3327 |
| 0.0358 | 105.0 | 4200 | 0.6017 | 0.3262 |
| 0.0302 | 112.5 | 4500 | 0.5613 | 0.3248 |
| 0.0285 | 120.0 | 4800 | 0.5659 | 0.3462 |
| 0.0213 | 127.5 | 5100 | 0.5568 | 0.3206 |
| 0.0215 | 135.0 | 5400 | 0.6524 | 0.3472 |
| 0.0162 | 142.5 | 5700 | 0.6223 | 0.3458 |
| 0.0137 | 150.0 | 6000 | 0.6625 | 0.3313 |
| 0.0114 | 157.5 | 6300 | 0.5739 | 0.3336 |
| 0.0101 | 165.0 | 6600 | 0.5906 | 0.3285 |
| 0.008 | 172.5 | 6900 | 0.5982 | 0.3112 |
| 0.0076 | 180.0 | 7200 | 0.5399 | 0.3094 |
| 0.0071 | 187.5 | 7500 | 0.5387 | 0.2991 |
| 0.0057 | 195.0 | 7800 | 0.5570 | 0.3038 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.2
- Tokenizers 0.11.0
|
DrishtiSharma/wav2vec2-large-xls-r-300m-vot-final-a2 | 40ea91a10a7c3ed8c40cef54dcabe4b5473126e4 | 2022-03-24T11:57:00.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"vot",
"dataset:mozilla-foundation/common_voice_8_0",
"transformers",
"mozilla-foundation/common_voice_8_0",
"robust-speech-event",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | DrishtiSharma | null | DrishtiSharma/wav2vec2-large-xls-r-300m-vot-final-a2 | 1 | null | transformers | 27,946 | ---
language:
- vot
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- vot
- robust-speech-event
- hf-asr-leaderboard
datasets:
- mozilla-foundation/common_voice_8_0
model-index:
- name: wav2vec2-large-xls-r-300m-vot-final-a2
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: vot
metrics:
- name: Test WER
type: wer
value: 0.8333333333333334
- name: Test CER
type: cer
value: 0.48672566371681414
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Robust Speech Event - Dev Data
type: speech-recognition-community-v2/dev_data
args: vot
metrics:
- name: Test WER
type: wer
value: NA
- name: Test CER
type: cer
value: NA
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-vot-final-a2
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - VOT dataset.
It achieves the following results on the evaluation set:
- Loss: 2.8745
- Wer: 0.8333
### Evaluation Commands
1. To evaluate on mozilla-foundation/common_voice_8_0 with test split
python eval.py --model_id DrishtiSharma/wav2vec2-large-xls-r-300m-vot-final-a2 --dataset mozilla-foundation/common_voice_8_0 --config vot --split test --log_outputs
2. To evaluate on speech-recognition-community-v2/dev_data
Votic language isn't available in speech-recognition-community-v2/dev_data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0004
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 340
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 11.1216 | 33.33 | 100 | 4.2848 | 1.0 |
| 2.9982 | 66.67 | 200 | 2.8665 | 1.0 |
| 1.5476 | 100.0 | 300 | 2.3022 | 0.8889 |
| 0.2776 | 133.33 | 400 | 2.7480 | 0.8889 |
| 0.1136 | 166.67 | 500 | 2.5383 | 0.8889 |
| 0.0489 | 200.0 | 600 | 2.8745 | 0.8333 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
DueLinx0402/DialoGPT-small-harrypotter | 822dcfa71868992db9a939f10d9e1c74e0d91d9d | 2021-09-14T13:31:42.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | DueLinx0402 | null | DueLinx0402/DialoGPT-small-harrypotter | 1 | null | transformers | 27,947 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
Dumiiii/wav2vec2-xls-r-300m-romanian | a8857e4671dc5e1eab487a6c2caa9cf63b2a2d8a | 2022-01-17T13:34:59.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | Dumiiii | null | Dumiiii/wav2vec2-xls-r-300m-romanian | 1 | null | transformers | 27,948 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
name: wav2vec2-xls-r-300m-romanian
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
## This model achieves WER on common-voice ro test split of WER: 12.457178%
# wav2vec2-xls-r-300m-romanian
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on an common voice ro and RSS dataset.
It achieves the following results on the evaluation set:
- eval_loss: 0.0836
- eval_wer: 0.0705
- eval_runtime: 160.4549
- eval_samples_per_second: 11.081
- eval_steps_per_second: 1.39
- epoch: 14.38
- step: 2703
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 50
- num_epochs: 15
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.13.3
- Tokenizers 0.10.3
Used the following code for evaluation:
```
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "ro", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("Dumiiii/wav2vec2-xls-r-300m-romanian")
model = Wav2Vec2ForCTC.from_pretrained("Dumiiii/wav2vec2-xls-r-300m-romanian")
model.to("cuda")
chars_to_ignore_regex = '['+string.punctuation+']'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
Credits for evaluation: https://huggingface.co/anton-l |
Eagle3ye/DialoGPT-small-PeppaPig | 9d5c9a5d1a24e96828b9e76372fce5e7c443648f | 2021-08-27T15:03:14.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Eagle3ye | null | Eagle3ye/DialoGPT-small-PeppaPig | 1 | null | transformers | 27,949 | ---
tags:
- conversational
---
# Peppa Pig DialoGPT Model |
Ebtihal/AraBertMo_base_V1 | b2d6d27bb62d83e27190a814df0a0caab0a69552 | 2022-03-15T19:14:23.000Z | [
"pytorch",
"bert",
"fill-mask",
"ar",
"dataset:OSCAR",
"transformers",
"Fill-Mask",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/AraBertMo_base_V1 | 1 | null | transformers | 27,950 | ---
language: ar
tags: Fill-Mask
datasets: OSCAR
widget:
- text: " السلام عليكم ورحمة[MASK] وبركاتة"
- text: " اهلا وسهلا بكم في [MASK] من سيربح المليون"
- text: " مرحبا بك عزيزي الزائر [MASK] موقعنا "
---
# Arabic BERT Model
**AraBERTMo** is an Arabic pre-trained language model based on [Google's BERT architechture](https://github.com/google-research/bert).
AraBERTMo_base uses the same BERT-Base config.
AraBERTMo_base now comes in 10 new variants
All models are available on the `HuggingFace` model page under the [Ebtihal](https://huggingface.co/Ebtihal/) name.
Checkpoints are available in PyTorch formats.
## Pretraining Corpus
`AraBertMo_base_V1' model was pre-trained on ~3 million words:
- [OSCAR](https://traces1.inria.fr/oscar/) - Arabic version "unshuffled_deduplicated_ar".
## Training results
this model achieves the following results:
| Task | Num examples | Num Epochs | Batch Size | steps | Wall time | training loss|
|:----:|:----:|:----:|:----:|:-----:|:----:|:-----:|
| Fill-Mask| 10010| 1 | 64 | 157 | 2m 2s | 9.0183 |
## Load Pretrained Model
You can use this model by installing `torch` or `tensorflow` and Huggingface library `transformers`. And you can use it directly by initializing it like this:
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Ebtihal/AraBertMo_base_V1")
model = AutoModelForMaskedLM.from_pretrained("Ebtihal/AraBertMo_base_V1")
```
## This model was built for master's degree research in an organization:
- [University of kufa](https://uokufa.edu.iq/).
- [Faculty of Computer Science and Mathematics](https://mathcomp.uokufa.edu.iq/).
- **Department of Computer Science**
|
Ebtihal/AraBertMo_base_V2 | 0654a3cfaa744225e51134c23f298125112cd1cb | 2022-03-15T19:14:01.000Z | [
"pytorch",
"bert",
"fill-mask",
"ar",
"dataset:OSCAR",
"transformers",
"Fill-Mask",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/AraBertMo_base_V2 | 1 | null | transformers | 27,951 | ---
language: ar
tags: Fill-Mask
datasets: OSCAR
widget:
- text: " السلام عليكم ورحمة[MASK] وبركاتة"
- text: " اهلا وسهلا بكم في [MASK] من سيربح المليون"
- text: " مرحبا بك عزيزي الزائر [MASK] موقعنا "
---
# Arabic BERT Model
**AraBERTMo** is an Arabic pre-trained language model based on [Google's BERT architechture](https://github.com/google-research/bert).
AraBERTMo_base uses the same BERT-Base config.
AraBERTMo_base now comes in 10 new variants
All models are available on the `HuggingFace` model page under the [Ebtihal](https://huggingface.co/Ebtihal/) name.
Checkpoints are available in PyTorch formats.
## Pretraining Corpus
`AraBertMo_base_V2' model was pre-trained on ~3 million words:
- [OSCAR](https://traces1.inria.fr/oscar/) - Arabic version "unshuffled_deduplicated_ar".
## Training results
this model achieves the following results:
| Task | Num examples | Num Epochs | Batch Size | steps | Wall time | training loss|
|:----:|:----:|:----:|:----:|:-----:|:----:|:-----:|
| Fill-Mask| 20020| 2 | 64 | 626 | 19m 2s | 8.437 |
## Load Pretrained Model
You can use this model by installing `torch` or `tensorflow` and Huggingface library `transformers`. And you can use it directly by initializing it like this:
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Ebtihal/AraBertMo_base_V2")
model = AutoModelForMaskedLM.from_pretrained("Ebtihal/AraBertMo_base_V2")
```
## This model was built for master's degree research in an organization:
- [University of kufa](https://uokufa.edu.iq/).
- [Faculty of Computer Science and Mathematics](https://mathcomp.uokufa.edu.iq/).
- **Department of Computer Science**
|
Ebtihal/AraBertMo_base_V5 | b2a00bce8f0934f717cd4de2290041390f0a9c96 | 2022-03-15T19:12:59.000Z | [
"pytorch",
"bert",
"fill-mask",
"ar",
"dataset:OSCAR",
"transformers",
"Fill-Mask",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/AraBertMo_base_V5 | 1 | null | transformers | 27,952 | ---
language: ar
tags: Fill-Mask
datasets: OSCAR
widget:
- text: " السلام عليكم ورحمة[MASK] وبركاتة"
- text: " اهلا وسهلا بكم في [MASK] من سيربح المليون"
- text: " مرحبا بك عزيزي الزائر [MASK] موقعنا "
---
# Arabic BERT Model
**AraBERTMo** is an Arabic pre-trained language model based on [Google's BERT architechture](https://github.com/google-research/bert).
AraBERTMo_base uses the same BERT-Base config.
AraBERTMo_base now comes in 10 new variants
All models are available on the `HuggingFace` model page under the [Ebtihal](https://huggingface.co/Ebtihal/) name.
Checkpoints are available in PyTorch formats.
## Pretraining Corpus
`AraBertMo_base_V5' model was pre-trained on ~3 million words:
- [OSCAR](https://traces1.inria.fr/oscar/) - Arabic version "unshuffled_deduplicated_ar".
## Training results
this model achieves the following results:
| Task | Num examples | Num Epochs | Batch Size | steps | Wall time | training loss|
|:----:|:----:|:----:|:----:|:-----:|:----:|:-----:|
| Fill-Mask| 50046| 5 | 64 | 3910 | 6h 49m 59s | 7.4599 |
## Load Pretrained Model
You can use this model by installing `torch` or `tensorflow` and Huggingface library `transformers`. And you can use it directly by initializing it like this:
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("Ebtihal/AraBertMo_base_V5")
model = AutoModelForMaskedLM.from_pretrained("Ebtihal/AraBertMo_base_V5")
```
## This model was built for master's degree research in an organization:
- [University of kufa](https://uokufa.edu.iq/).
- [Faculty of Computer Science and Mathematics](https://mathcomp.uokufa.edu.iq/).
- **Department of Computer Science**
|
Ebtihal/AraDiaBERT | 14650766a8cf1ecc31d75f0aea1167c0d75daf86 | 2021-07-26T14:38:29.000Z | [
"pytorch",
"bert",
"text-generation",
"transformers"
] | text-generation | false | Ebtihal | null | Ebtihal/AraDiaBERT | 1 | null | transformers | 27,953 | Entry not found |
Ebtihal/AraDiaBERTo | d42d0189866f4ec6a1ec83e7b8ca965c8e1ae678 | 2021-09-16T16:45:45.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/AraDiaBERTo | 1 | null | transformers | 27,954 | Entry not found |
Ebtihal/AraDiaBERTo_V2 | 4329e3cfbdcb22415f451ca7c9ea8fa17ed9ab06 | 2021-09-28T15:45:41.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/AraDiaBERTo_V2 | 1 | null | transformers | 27,955 | Entry not found |
Ebtihal/EsperBERTo | 65b6c1eb70c5406fb5c6e45a423d5c26d9804017 | 2021-07-06T19:17:37.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/EsperBERTo | 1 | null | transformers | 27,956 | Entry not found |
Ebtihal/bert-ar | 1def53c6b8cb0bf614073fa20794cf56be8e5b35 | 2021-09-30T22:05:15.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/bert-ar | 1 | null | transformers | 27,957 | Entry not found |
Ebtihal/bert-en | 88b3cb950026a4b308aa3ba1a616e47c17f36b72 | 2021-11-02T19:35:55.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Ebtihal | null | Ebtihal/bert-en | 1 | null | transformers | 27,958 | Entry not found |
Edaiplay/edaiplay-t5model | 4a82139a9d8a2aaeb8e5c5be905ec25047f52a34 | 2021-09-16T14:36:45.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Edaiplay | null | Edaiplay/edaiplay-t5model | 1 | 1 | transformers | 27,959 | Entry not found |
Edomonndo/opus-mt-en-ro-finetuned-en-to-ro | a56c9164886ed5ccb8fc07e8f26a9cc58f70c4ed | 2021-07-27T05:34:02.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"dataset:wmt16",
"transformers",
"generated_from_trainer",
"autotrain_compatible"
] | text2text-generation | false | Edomonndo | null | Edomonndo/opus-mt-en-ro-finetuned-en-to-ro | 1 | null | transformers | 27,960 | ---
tags:
- generated_from_trainer
datasets:
- wmt16
metrics:
- bleu
model_index:
- name: opus-mt-en-ro-finetuned-en-to-ro
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: wmt16
type: wmt16
args: ro-en
metric:
name: Bleu
type: bleu
value: 28.1641
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# opus-mt-en-ro-finetuned-en-to-ro
This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-ro](https://huggingface.co/Helsinki-NLP/opus-mt-en-ro) on the wmt16 dataset.
It achieves the following results on the evaluation set:
- Loss: 1.2886
- Bleu: 28.1641
- Gen Len: 34.1071
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|
| 0.7436 | 1.0 | 38145 | 1.2886 | 28.1641 | 34.1071 |
### Framework versions
- Transformers 4.9.1
- Pytorch 1.9.0+cu102
- Datasets 1.10.2
- Tokenizers 0.10.3
|
Edomonndo/opus-mt-ja-en-finetuned-ja-to-en_xml | 00baf107e2e3119e93644934df231d6184de4d37 | 2021-12-04T10:23:03.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible"
] | text2text-generation | false | Edomonndo | null | Edomonndo/opus-mt-ja-en-finetuned-ja-to-en_xml | 1 | null | transformers | 27,961 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- bleu
model_index:
- name: opus-mt-ja-en-finetuned-ja-to-en_xml
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
metric:
name: Bleu
type: bleu
value: 73.8646
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# opus-mt-ja-en-finetuned-ja-to-en_xml
This model is a fine-tuned version of [Helsinki-NLP/opus-mt-ja-en](https://huggingface.co/Helsinki-NLP/opus-mt-ja-en) on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7520
- Bleu: 73.8646
- Gen Len: 27.0884
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0002
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|
| 1.0512 | 1.0 | 748 | 0.8333 | 59.8234 | 27.905 |
| 0.6076 | 2.0 | 1496 | 0.7817 | 62.5606 | 26.1834 |
| 0.4174 | 3.0 | 2244 | 0.7817 | 64.8346 | 28.2918 |
| 0.2971 | 4.0 | 2992 | 0.7653 | 67.6013 | 27.2222 |
| 0.2172 | 5.0 | 3740 | 0.7295 | 69.4017 | 27.0174 |
| 0.1447 | 6.0 | 4488 | 0.7522 | 68.8355 | 28.2865 |
| 0.0953 | 7.0 | 5236 | 0.7596 | 71.4743 | 27.1861 |
| 0.0577 | 8.0 | 5984 | 0.7469 | 72.0684 | 26.921 |
| 0.04 | 9.0 | 6732 | 0.7526 | 73.2821 | 27.1365 |
| 0.0213 | 10.0 | 7480 | 0.7520 | 73.8646 | 27.0884 |
### Framework versions
- Transformers 4.9.1
- Pytorch 1.10.0+cu111
- Datasets 1.10.2
- Tokenizers 0.10.3
|
EhsanAghazadeh/electra-base-random-weights | 3309752aa61ef2b6d0a4f8f50d6a6b28219dee92 | 2021-09-04T20:29:50.000Z | [
"pytorch",
"electra",
"feature-extraction",
"transformers"
] | feature-extraction | false | EhsanAghazadeh | null | EhsanAghazadeh/electra-base-random-weights | 1 | null | transformers | 27,962 | Entry not found |
EhsanAghazadeh/roberta-base-random-weights | 8b5969b19f4c031cf55b8dc99ed6fe10595ef7f3 | 2021-09-04T20:27:13.000Z | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | EhsanAghazadeh | null | EhsanAghazadeh/roberta-base-random-weights | 1 | null | transformers | 27,963 | Entry not found |
Elbe/RoBERTaforIns | fcdbdbf8e21c4f52fa2c0645bb4e323cbeb0601b | 2021-05-20T11:47:50.000Z | [
"pytorch",
"jax",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Elbe | null | Elbe/RoBERTaforIns | 1 | null | transformers | 27,964 | Entry not found |
Elzen7/DialoGPT-medium-harrypotter | 840c6f973ad0398b2c6308150baca7a4036923ce | 2021-10-19T07:54:41.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Elzen7 | null | Elzen7/DialoGPT-medium-harrypotter | 1 | null | transformers | 27,965 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
Emanuel/roebrta-base-val-test | ebaec6c4ae89212cd7c1e5c449813f9182f1943a | 2022-01-23T15:12:04.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] | fill-mask | false | Emanuel | null | Emanuel/roebrta-base-val-test | 1 | null | transformers | 27,966 | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: language-modeling
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# language-modeling
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4229
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- distributed_type: tpu
- num_devices: 8
- total_train_batch_size: 64
- total_eval_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
### Framework versions
- Transformers 4.16.0.dev0
- Pytorch 1.8.1+cu102
- Datasets 1.13.3
- Tokenizers 0.10.3
|
Eunooeh/mnmt_gpt2 | 5a343a5698e45baa08e0035785012e00e7329cdf | 2021-12-13T02:53:13.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Eunooeh | null | Eunooeh/mnmt_gpt2 | 1 | null | transformers | 27,967 | Entry not found |
ExEngineer/DialoGPT-medium-jdt | ad9147bf49b03f675b760c79ec1b32202e6f0784 | 2022-01-13T17:40:04.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | ExEngineer | null | ExEngineer/DialoGPT-medium-jdt | 1 | null | transformers | 27,968 | ---
tags:
- conversational
---
#jdt chat bot |
Eyvaz/wav2vec2-base-russian-demo-kaggle | 53e29aa33d4adb96c21ddba943b2ab664c890a7f | 2021-12-04T11:00:23.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | Eyvaz | null | Eyvaz/wav2vec2-base-russian-demo-kaggle | 1 | 1 | transformers | 27,969 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: wav2vec2-base-russian-demo-kaggle
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-russian-demo-kaggle
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: inf
- Wer: 0.9997
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 12
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 24
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 0.0102 | 1.03 | 500 | inf | 0.9997 |
| 0.0068 | 2.06 | 1000 | inf | 0.9997 |
| 0.0 | 3.09 | 1500 | inf | 0.9997 |
| 0.0313 | 4.12 | 2000 | inf | 0.9997 |
| 0.0 | 5.15 | 2500 | inf | 0.9997 |
| 0.0052 | 6.19 | 3000 | inf | 0.9997 |
| 0.0287 | 7.22 | 3500 | inf | 0.9997 |
| 0.0 | 8.25 | 4000 | inf | 0.9997 |
| 0.01 | 9.28 | 4500 | inf | 0.9997 |
| 0.0 | 10.31 | 5000 | inf | 0.9997 |
| 0.3919 | 11.34 | 5500 | inf | 0.9997 |
| 0.0 | 12.37 | 6000 | inf | 0.9997 |
| 0.0 | 13.4 | 6500 | inf | 0.9997 |
| 0.0 | 14.43 | 7000 | inf | 0.9997 |
| 0.6422 | 15.46 | 7500 | inf | 0.9997 |
| 0.0 | 16.49 | 8000 | inf | 0.9997 |
| 0.0 | 17.53 | 8500 | inf | 0.9997 |
| 0.0 | 18.56 | 9000 | inf | 0.9997 |
| 0.0 | 19.59 | 9500 | inf | 0.9997 |
| 0.0 | 20.62 | 10000 | inf | 0.9997 |
| 0.0427 | 21.65 | 10500 | inf | 0.9997 |
| 0.0 | 22.68 | 11000 | inf | 0.9997 |
| 0.0 | 23.71 | 11500 | inf | 0.9997 |
| 0.0 | 24.74 | 12000 | inf | 0.9997 |
| 0.0091 | 25.77 | 12500 | inf | 0.9997 |
| 0.1243 | 26.8 | 13000 | inf | 0.9997 |
| 0.0 | 27.83 | 13500 | inf | 0.9997 |
| 0.0 | 28.87 | 14000 | inf | 0.9997 |
| 0.0 | 29.9 | 14500 | inf | 0.9997 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.9.1
- Datasets 1.13.3
- Tokenizers 0.10.3
|
Eyvaz/wav2vec2-base-russian-modified-kaggle | ef5ef6096c788bbf43851072fc38af0f57b37018 | 2021-12-17T18:39:50.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | Eyvaz | null | Eyvaz/wav2vec2-base-russian-modified-kaggle | 1 | 1 | transformers | 27,970 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
name: wav2vec2-base-russian-modified-kaggle
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-russian-modified-kaggle
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 12
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 24
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.11.3
- Pytorch 1.9.1
- Datasets 1.13.3
- Tokenizers 0.10.3
|
FAN-L/HM_model001 | b73f62cd4cc156f8be3cb699f2c4b35a0344ada8 | 2021-11-02T04:23:58.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | FAN-L | null | FAN-L/HM_model001 | 1 | null | transformers | 27,971 | Entry not found |
FabianGroeger/HotelBERT-small | 367aa3d306b5ddff5e119e1b88fe8e378f178de3 | 2021-11-18T05:39:47.000Z | [
"pytorch",
"tf",
"roberta",
"fill-mask",
"de",
"transformers",
"autotrain_compatible"
] | fill-mask | false | FabianGroeger | null | FabianGroeger/HotelBERT-small | 1 | null | transformers | 27,972 | ---
language: de
widget:
- text: "Das <mask> hat sich toll um uns gekümmert."
---
# HotelBERT-small
This model was trained on reviews from a well known German hotel platform.
|
FarisHijazi/wav2vec2-large-xls-r-300m-arabic-colab | 9de71c98aa627c5a54c6b1dcc166988da91f533b | 2021-12-19T02:47:17.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | FarisHijazi | null | FarisHijazi/wav2vec2-large-xls-r-300m-arabic-colab | 1 | null | transformers | 27,973 | Entry not found |
Fidlobabovic/beta-kvantorium-simple-small | 662e1bb4d63069b4d2cb9a953611d8fa54ecd8e7 | 2021-05-20T11:50:06.000Z | [
"pytorch",
"jax",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Fidlobabovic | null | Fidlobabovic/beta-kvantorium-simple-small | 1 | null | transformers | 27,974 | Beta-kavntorium-simple-small is a transformers model RoBerta pretrained on a large corpus of Russion kvantorim data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with objective:
Automate communication with the Quantorium community and mentors. |
FirmanBr/chibibot | 5995b8e752438fa06c00340bcece414f3f81900b | 2021-05-18T18:39:06.000Z | [
"pytorch",
"jax",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | FirmanBr | null | FirmanBr/chibibot | 1 | null | transformers | 27,975 | Entry not found |
FitoDS/wav2vec2-large-xls-r-300m-guarani-colab | 25203e060e87b0c9eff8ae153bb4d1a1dbceb0aa | 2022-01-28T16:22:06.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | FitoDS | null | FitoDS/wav2vec2-large-xls-r-300m-guarani-colab | 1 | null | transformers | 27,976 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: wav2vec2-large-xls-r-300m-guarani-colab
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-guarani-colab
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 3.2392
- Wer: 1.0743
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 100
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 18.2131 | 49.94 | 400 | 3.2901 | 1.0 |
| 2.0496 | 99.94 | 800 | 3.2392 | 1.0743 |
### Framework versions
- Transformers 4.16.0.dev0
- Pytorch 1.10.1+cu102
- Datasets 1.17.1.dev0
- Tokenizers 0.11.0
|
Flampt/DialoGPT-medium-Sheldon | 424d62c7b7f1cc602c38aa1a0303cc5ee08e3137 | 2021-08-28T14:17:44.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Flampt | null | Flampt/DialoGPT-medium-Sheldon | 1 | null | transformers | 27,977 | ---
tags:
- conversational
---
# Sheldon Cooper from The Big Bang Theory Show DialoGPT Model |
Francesco/dummy | 31afef9d94e18b3c2c3ba1e3f586e71750c56b93 | 2021-04-09T14:53:50.000Z | [
"pytorch",
"transformers"
] | null | false | Francesco | null | Francesco/dummy | 1 | null | transformers | 27,978 | Entry not found |
Francesco/resnet152-224-1k | 9e19bd89fe359caaa18146108dc193c8fb894d7a | 2022-02-23T11:53:02.000Z | [
"pytorch",
"resnet",
"image-classification",
"transformers"
] | image-classification | false | Francesco | null | Francesco/resnet152-224-1k | 1 | null | transformers | 27,979 | Entry not found |
Francesco/resnet18-224-1k | 0b691e7f254db5b1a35788b2722c72e4fbe48820 | 2022-02-23T11:49:32.000Z | [
"pytorch",
"resnet",
"image-classification",
"transformers"
] | image-classification | false | Francesco | null | Francesco/resnet18-224-1k | 1 | null | transformers | 27,980 | Entry not found |
Francesco/resnet34-224-1k | 6ca0fcd4f17c3ab2f13106c01fafd76e219a8817 | 2022-02-23T11:50:32.000Z | [
"pytorch",
"resnet",
"image-classification",
"transformers"
] | image-classification | false | Francesco | null | Francesco/resnet34-224-1k | 1 | null | transformers | 27,981 | Entry not found |
Francesco/resnet50-224-1k | 5e8fd13e66c63caadd8f7acabe4a84252999dc51 | 2022-02-23T11:51:05.000Z | [
"pytorch",
"resnet",
"image-classification",
"transformers"
] | image-classification | false | Francesco | null | Francesco/resnet50-224-1k | 1 | null | transformers | 27,982 | Entry not found |
GKLMIP/electra-tagalog-base-uncased | 19124b0217c2e8a08b6d0729f612603dedd84e34 | 2021-07-31T02:14:00.000Z | [
"pytorch",
"electra",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | GKLMIP | null | GKLMIP/electra-tagalog-base-uncased | 1 | null | transformers | 27,983 | https://github.com/GKLMIP/Pretrained-Models-For-Tagalog
If you use our model, please consider citing our paper:
```
@InProceedings{,
author="Jiang, Shengyi
and Fu, Yingwen
and Lin, Xiaotian
and Lin, Nankai",
title="Pre-trained Language models for Tagalog with Multi-source data",
booktitle="Natural Language Processing and Chinese Computing",
year="2021",
publisher="Springer International Publishing",
address="Cham",
}
``` |
GPL/bioasq-1m-msmarco-distilbert-gpl | 92084c813b36ebb8637dbd8a4b70efff5fa2b823 | 2022-04-19T15:18:19.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/bioasq-1m-msmarco-distilbert-gpl | 1 | null | sentence-transformers | 27,984 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GPL/cqadupstack-tsdae-msmarco-distilbert-margin-mse | d8d18b60e43263c848d904fa201737eecaa4c99d | 2022-04-19T16:50:27.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | GPL | null | GPL/cqadupstack-tsdae-msmarco-distilbert-margin-mse | 1 | null | transformers | 27,985 | Entry not found |
GPL/fiqa-tsdae-msmarco-distilbert-gpl | a81c04f3a52c0d29dcea52ee3587e27aca60ce55 | 2022-04-19T15:28:28.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/fiqa-tsdae-msmarco-distilbert-gpl | 1 | null | sentence-transformers | 27,986 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GPL/robust04-tsdae-msmarco-distilbert-gpl | 0f45643680b23cfc1ed38874650cd30f317af952 | 2022-04-19T16:30:20.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/robust04-tsdae-msmarco-distilbert-gpl | 1 | null | sentence-transformers | 27,987 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GPL/robust04-tsdae-msmarco-distilbert-margin-mse | 869e79fffe5275bedbb1d921212a7dcdfdcd2541 | 2022-04-19T16:50:54.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | GPL | null | GPL/robust04-tsdae-msmarco-distilbert-margin-mse | 1 | null | transformers | 27,988 | Entry not found |
GPL/trec-covid-v2-msmarco-distilbert-gpl | 139f349a8d9cf8bb2b7ef6548e06ea60d83f122e | 2022-04-19T15:18:49.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/trec-covid-v2-msmarco-distilbert-gpl | 1 | null | sentence-transformers | 27,989 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GabbyDaBUNBUN/DialoGPT-medium-PinkiePie | 046198fae3aa399819b493633891cf6acc2a0285 | 2022-02-02T03:24:51.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational",
"license:mit"
] | conversational | false | GabbyDaBUNBUN | null | GabbyDaBUNBUN/DialoGPT-medium-PinkiePie | 1 | null | transformers | 27,990 | ---
tags:
- conversational
license: mit
---
# Pinkie Pie Chatbot
used from r3dhummingbird! |
GammaPTest/e_bot | ff943ab33d9af7edd356585cfcd2a0dc80234439 | 2021-11-19T18:29:45.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | GammaPTest | null | GammaPTest/e_bot | 1 | null | transformers | 27,991 | This be a test |
Gantenbein/ADDI-CH-GPT2 | 46c71055abbc91afd11bc3673e7ab0e44d6fba5e | 2021-06-02T13:58:54.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Gantenbein | null | Gantenbein/ADDI-CH-GPT2 | 1 | null | transformers | 27,992 | |
Gantenbein/ADDI-CH-XLM-R | 3f24058f07debc1bdb3868b3e096b9b0d0defffb | 2021-06-01T13:55:25.000Z | [
"pytorch",
"xlm-roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Gantenbein | null | Gantenbein/ADDI-CH-XLM-R | 1 | null | transformers | 27,993 | Entry not found |
Gantenbein/ADDI-DE-GPT2 | 6b306a810f2691b4e7b43a801925c63d9ca0f470 | 2021-06-01T14:29:34.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Gantenbein | null | Gantenbein/ADDI-DE-GPT2 | 1 | null | transformers | 27,994 | Entry not found |
Gantenbein/ADDI-DE-RoBERTa | be7917ef9d473820e583ffcaf992de2acb4a423f | 2021-06-01T14:30:17.000Z | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Gantenbein | null | Gantenbein/ADDI-DE-RoBERTa | 1 | null | transformers | 27,995 | Entry not found |
Gantenbein/ADDI-FI-XLM-R | 1c41a0ccec9d9c5fd88a1c4c67c942aa932e3999 | 2021-06-01T14:12:53.000Z | [
"pytorch",
"xlm-roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Gantenbein | null | Gantenbein/ADDI-FI-XLM-R | 1 | null | transformers | 27,996 | Entry not found |
Gantenbein/ADDI-FR-GPT2 | fc2f2020265e84b34612755ae73a37e2c9b893a5 | 2021-06-01T14:07:50.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Gantenbein | null | Gantenbein/ADDI-FR-GPT2 | 1 | null | transformers | 27,997 | Entry not found |
Gantenbein/ADDI-FR-RoBERTa | 1b9ac13ca64c19bff9fa9ccde085942846bc5468 | 2021-06-01T14:07:22.000Z | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Gantenbein | null | Gantenbein/ADDI-FR-RoBERTa | 1 | null | transformers | 27,998 | Entry not found |
Gantenbein/ADDI-FR-XLM-R | 9c4bf69d2235d91c669f9733cdda32e298a730a8 | 2021-06-01T14:06:53.000Z | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Gantenbein | null | Gantenbein/ADDI-FR-XLM-R | 1 | null | transformers | 27,999 | Entry not found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.