modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
connectivity/bert_ft_qqp-40
abf026bd67ed4b613aa276c0a22b7edcad228e5c
2022-05-21T16:34:04.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-40
5
null
transformers
17,300
Entry not found
connectivity/bert_ft_qqp-44
906eb3c0ece122a7f1b1c7b074d8bf766b438222
2022-05-21T16:34:20.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-44
5
null
transformers
17,301
Entry not found
connectivity/bert_ft_qqp-45
93ffed2aa5df12c57936295d6afa28923598cc2d
2022-05-21T16:34:24.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-45
5
null
transformers
17,302
Entry not found
connectivity/bert_ft_qqp-48
6ae8f8025c8f81d4ab1b0d1b87e1fc0de641b3fb
2022-05-21T16:34:40.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-48
5
null
transformers
17,303
Entry not found
connectivity/bert_ft_qqp-66
21a916f1b46a0a74e95c40528efd9ed6a9569c1b
2022-05-21T16:36:07.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-66
5
null
transformers
17,304
Entry not found
connectivity/bert_ft_qqp-70
591019ccdeab0f7568e9d2ee2699da86b5ee3504
2022-05-21T16:36:22.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-70
5
null
transformers
17,305
Entry not found
connectivity/cola_6ep_ft-0
caecbf251c584748c352c8dbd246058c1480f1cc
2022-05-21T16:43:32.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/cola_6ep_ft-0
5
null
transformers
17,306
Entry not found
connectivity/bert_ft_qqp-80
fef608e9fd1a539c35b59abc2e83cb4a76abc498
2022-05-21T16:37:12.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-80
5
null
transformers
17,307
Entry not found
connectivity/bert_ft_qqp-81
bff58c96bdd52fe67de97ecf9b11605d6570fc08
2022-05-21T16:37:18.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-81
5
null
transformers
17,308
Entry not found
connectivity/bert_ft_qqp-83
c556fc5ca0520a74ffd3bc8fb352fb000a5eb14e
2022-05-21T16:37:27.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-83
5
null
transformers
17,309
Entry not found
connectivity/bert_ft_qqp-86
e0ea3e7ecec717d45ce11f92983a44aedbe18316
2022-05-21T16:37:39.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-86
5
null
transformers
17,310
Entry not found
connectivity/bert_ft_qqp-90
01a81817890d156efbe7d7527f43adfaa996e7c5
2022-05-21T16:37:53.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-90
5
null
transformers
17,311
Entry not found
connectivity/bert_ft_qqp-96
793e9dc23322e32a45a98ab973647744d4bff979
2022-05-21T16:38:22.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-96
5
null
transformers
17,312
Entry not found
connectivity/bert_ft_qqp-97
50dbbc1cea57ab6f4383292d0182597835e3376d
2022-05-21T16:38:25.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
connectivity
null
connectivity/bert_ft_qqp-97
5
null
transformers
17,313
Entry not found
prodm93/rn_gpt2_customdata_model.json
278806bd4e25689766f8c57dffc022a1e05967c9
2022-05-21T17:26:52.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
prodm93
null
prodm93/rn_gpt2_customdata_model.json
5
null
transformers
17,314
Entry not found
prodm93/T5Dynamic_title_model_v2
ec9e93db9cc0bafa689f8c86cfaad1cd21917446
2022-05-21T22:25:48.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
prodm93
null
prodm93/T5Dynamic_title_model_v2
5
null
transformers
17,315
Entry not found
SamuelMiller/qa_squad
3bdc643db90ae549fa231820d631d316056f89ec
2022-05-22T03:13:46.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
SamuelMiller
null
SamuelMiller/qa_squad
5
null
transformers
17,316
Entry not found
ocm/finetuning-sentiment-model-3000-samples
372361761b6e257d6d1a351c16c9d72958b37bfa
2022-05-22T16:12:20.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:imdb", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
ocm
null
ocm/finetuning-sentiment-model-3000-samples
5
null
transformers
17,317
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb args: plain_text metrics: - name: Accuracy type: accuracy value: 0.8766666666666667 - name: F1 type: f1 value: 0.877887788778878 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.3107 - Accuracy: 0.8767 - F1: 0.8779 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
viviastaari/finetuning-sentiment-analysis-en
c1eaf3837e74d5ac4cbe9b884424eee762dac4da
2022-05-23T03:04:12.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
viviastaari
null
viviastaari/finetuning-sentiment-analysis-en
5
null
transformers
17,318
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: finetuning-sentiment-analysis-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-analysis-en This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0792 - Accuracy: 0.9803 - F1: 0.9856 - Precision: 0.9875 - Recall: 0.9837 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | 0.426 | 1.0 | 1408 | 0.2718 | 0.8910 | 0.9201 | 0.9251 | 0.9151 | | 0.3247 | 2.0 | 2816 | 0.1552 | 0.9540 | 0.9665 | 0.9656 | 0.9674 | | 0.1582 | 3.0 | 4224 | 0.0792 | 0.9803 | 0.9856 | 0.9875 | 0.9837 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
roschmid/distilbert-base-uncased-finetuned-ner
1b1cdae335f687be6c4073109d65fa16aaf8886a
2022-05-23T09:23:51.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
roschmid
null
roschmid/distilbert-base-uncased-finetuned-ner
5
null
transformers
17,319
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.920704845814978 - name: Recall type: recall value: 0.9352276541000112 - name: F1 type: f1 value: 0.927909428936123 - name: Accuracy type: accuracy value: 0.9831604365577391 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0631 - Precision: 0.9207 - Recall: 0.9352 - F1: 0.9279 - Accuracy: 0.9832 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2399 | 1.0 | 878 | 0.0678 | 0.9097 | 0.9211 | 0.9154 | 0.9804 | | 0.0502 | 2.0 | 1756 | 0.0628 | 0.9152 | 0.9320 | 0.9235 | 0.9820 | | 0.0299 | 3.0 | 2634 | 0.0631 | 0.9207 | 0.9352 | 0.9279 | 0.9832 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
coreybrady/bert-emotion
5f1f35fe124f41db1d14e0659f94bf03b5d4bffc
2022-05-23T15:16:26.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:tweet_eval", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
coreybrady
null
coreybrady/bert-emotion
5
null
transformers
17,320
--- license: apache-2.0 tags: - generated_from_trainer datasets: - tweet_eval metrics: - precision - recall model-index: - name: bert-emotion results: - task: name: Text Classification type: text-classification dataset: name: tweet_eval type: tweet_eval args: emotion metrics: - name: Precision type: precision value: 0.7262254187805659 - name: Recall type: recall value: 0.725549671319356 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-emotion This model is a fine-tuned version of [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) on the tweet_eval dataset. It achieves the following results on the evaluation set: - Loss: 1.1670 - Precision: 0.7262 - Recall: 0.7255 - Fscore: 0.7253 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | Fscore | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:| | 0.8561 | 1.0 | 815 | 0.7844 | 0.7575 | 0.6081 | 0.6253 | | 0.5337 | 2.0 | 1630 | 0.9080 | 0.7567 | 0.7236 | 0.7325 | | 0.2573 | 3.0 | 2445 | 1.1670 | 0.7262 | 0.7255 | 0.7253 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
SI2M-Lab/DarijaBERT-mix
56bcb42b9054ea5a69f3d1af964bb7eefe0bc1fb
2022-05-24T09:04:15.000Z
[ "pytorch", "bert", "transformers" ]
null
false
SI2M-Lab
null
SI2M-Lab/DarijaBERT-mix
5
null
transformers
17,321
Entry not found
Sebabrata/lmv2-2022-05-24
f1dd3425e8be23a24ab2f2ef8b102f84ae53c1e0
2022-05-24T10:15:14.000Z
[ "pytorch", "tensorboard", "layoutlmv2", "token-classification", "transformers", "generated_from_trainer", "license:cc-by-nc-sa-4.0", "model-index", "autotrain_compatible" ]
token-classification
false
Sebabrata
null
Sebabrata/lmv2-2022-05-24
5
null
transformers
17,322
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer model-index: - name: lmv2-2022-05-24 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # lmv2-2022-05-24 This model is a fine-tuned version of [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0484 - Address Precision: 0.9474 - Address Recall: 1.0 - Address F1: 0.9730 - Address Number: 18 - Business Name Precision: 1.0 - Business Name Recall: 1.0 - Business Name F1: 1.0 - Business Name Number: 13 - City State Zip Code Precision: 0.8947 - City State Zip Code Recall: 0.8947 - City State Zip Code F1: 0.8947 - City State Zip Code Number: 19 - Ein Precision: 1.0 - Ein Recall: 1.0 - Ein F1: 1.0 - Ein Number: 4 - List Account Number Precision: 0.6 - List Account Number Recall: 0.75 - List Account Number F1: 0.6667 - List Account Number Number: 4 - Name Precision: 1.0 - Name Recall: 0.9444 - Name F1: 0.9714 - Name Number: 18 - Ssn Precision: 1.0 - Ssn Recall: 1.0 - Ssn F1: 1.0 - Ssn Number: 8 - Overall Precision: 0.9412 - Overall Recall: 0.9524 - Overall F1: 0.9467 - Overall Accuracy: 0.9979 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Address Precision | Address Recall | Address F1 | Address Number | Business Name Precision | Business Name Recall | Business Name F1 | Business Name Number | City State Zip Code Precision | City State Zip Code Recall | City State Zip Code F1 | City State Zip Code Number | Ein Precision | Ein Recall | Ein F1 | Ein Number | List Account Number Precision | List Account Number Recall | List Account Number F1 | List Account Number Number | Name Precision | Name Recall | Name F1 | Name Number | Ssn Precision | Ssn Recall | Ssn F1 | Ssn Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:-----------------:|:--------------:|:----------:|:--------------:|:-----------------------:|:--------------------:|:----------------:|:--------------------:|:-----------------------------:|:--------------------------:|:----------------------:|:--------------------------:|:-------------:|:----------:|:------:|:----------:|:-----------------------------:|:--------------------------:|:----------------------:|:--------------------------:|:--------------:|:-----------:|:-------:|:-----------:|:-------------:|:----------:|:------:|:----------:|:-----------------:|:--------------:|:----------:|:----------------:| | 1.9388 | 1.0 | 79 | 1.5568 | 0.0 | 0.0 | 0.0 | 18 | 0.0 | 0.0 | 0.0 | 13 | 0.0 | 0.0 | 0.0 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 18 | 0.0 | 0.0 | 0.0 | 8 | 0.0 | 0.0 | 0.0 | 0.9465 | | 1.3777 | 2.0 | 158 | 1.1259 | 0.0 | 0.0 | 0.0 | 18 | 0.0 | 0.0 | 0.0 | 13 | 0.0 | 0.0 | 0.0 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 18 | 0.0 | 0.0 | 0.0 | 8 | 0.0 | 0.0 | 0.0 | 0.9465 | | 0.9629 | 3.0 | 237 | 0.7497 | 0.0 | 0.0 | 0.0 | 18 | 0.0 | 0.0 | 0.0 | 13 | 0.0 | 0.0 | 0.0 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 18 | 0.0 | 0.0 | 0.0 | 8 | 0.0 | 0.0 | 0.0 | 0.9465 | | 0.6292 | 4.0 | 316 | 0.4818 | 0.0 | 0.0 | 0.0 | 18 | 0.0 | 0.0 | 0.0 | 13 | 0.0 | 0.0 | 0.0 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 18 | 0.1944 | 0.875 | 0.3182 | 8 | 0.1944 | 0.0833 | 0.1167 | 0.9523 | | 0.3952 | 5.0 | 395 | 0.2982 | 0.2424 | 0.8889 | 0.3810 | 18 | 0.0 | 0.0 | 0.0 | 13 | 0.1111 | 0.1053 | 0.1081 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 4 | 0.0 | 0.0 | 0.0 | 18 | 0.6364 | 0.875 | 0.7368 | 8 | 0.2632 | 0.2976 | 0.2793 | 0.9660 | | 0.2675 | 6.0 | 474 | 0.2183 | 1.0 | 0.9444 | 0.9714 | 18 | 0.0 | 0.0 | 0.0 | 13 | 0.8824 | 0.7895 | 0.8333 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 0.1905 | 0.4444 | 0.2667 | 18 | 0.5714 | 1.0 | 0.7273 | 8 | 0.5204 | 0.6071 | 0.5604 | 0.9810 | | 0.2095 | 7.0 | 553 | 0.1990 | 1.0 | 0.9444 | 0.9714 | 18 | 0.0833 | 0.0769 | 0.08 | 13 | 0.9375 | 0.7895 | 0.8571 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.75 | 0.75 | 0.75 | 4 | 0.2647 | 0.5 | 0.3462 | 18 | 0.1739 | 1.0 | 0.2963 | 8 | 0.4109 | 0.6310 | 0.4977 | 0.9762 | | 0.1928 | 8.0 | 632 | 0.1704 | 1.0 | 0.9444 | 0.9714 | 18 | 0.3158 | 0.4615 | 0.3750 | 13 | 0.9412 | 0.8421 | 0.8889 | 19 | 0.0 | 0.0 | 0.0 | 4 | 1.0 | 0.75 | 0.8571 | 4 | 0.3214 | 0.5 | 0.3913 | 18 | 0.5385 | 0.875 | 0.6667 | 8 | 0.5979 | 0.6905 | 0.6409 | 0.9849 | | 0.159 | 9.0 | 711 | 0.1339 | 1.0 | 0.9444 | 0.9714 | 18 | 0.45 | 0.6923 | 0.5455 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.25 | 0.75 | 0.375 | 4 | 0.375 | 0.5 | 0.4286 | 18 | 0.2308 | 0.375 | 0.2857 | 8 | 0.5577 | 0.6905 | 0.6170 | 0.9871 | | 0.1314 | 10.0 | 790 | 0.1199 | 0.9444 | 0.9444 | 0.9444 | 18 | 0.8571 | 0.9231 | 0.8889 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 0.7895 | 0.8333 | 0.8108 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.8372 | 0.8571 | 0.8471 | 0.9897 | | 0.1143 | 11.0 | 869 | 0.1127 | 0.9444 | 0.9444 | 0.9444 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.9036 | 0.8929 | 0.8982 | 0.9903 | | 0.1037 | 12.0 | 948 | 0.1039 | 0.85 | 0.9444 | 0.8947 | 18 | 0.9167 | 0.8462 | 0.8800 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 0.8889 | 0.8889 | 0.8889 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.8471 | 0.8571 | 0.8521 | 0.9901 | | 0.0925 | 13.0 | 1027 | 0.1124 | 1.0 | 0.9444 | 0.9714 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.75 | 0.75 | 0.75 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 0.5833 | 0.875 | 0.7000 | 8 | 0.9136 | 0.8810 | 0.8970 | 0.9904 | | 0.0863 | 14.0 | 1106 | 0.1077 | 0.9444 | 0.9444 | 0.9444 | 18 | 0.7333 | 0.8462 | 0.7857 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 0.6154 | 1.0 | 0.7619 | 8 | 0.8488 | 0.8690 | 0.8588 | 0.9916 | | 0.0845 | 15.0 | 1185 | 0.1035 | 0.9444 | 0.9444 | 0.9444 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9412 | 0.8421 | 0.8889 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 0.5833 | 0.875 | 0.7000 | 8 | 0.8902 | 0.8690 | 0.8795 | 0.9921 | | 0.0735 | 16.0 | 1264 | 0.0866 | 0.6667 | 0.8889 | 0.7619 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.8315 | 0.8810 | 0.8555 | 0.9918 | | 0.0714 | 17.0 | 1343 | 0.0781 | 0.9444 | 0.9444 | 0.9444 | 18 | 1.0 | 0.9231 | 0.9600 | 13 | 0.9412 | 0.8421 | 0.8889 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.9012 | 0.8690 | 0.8848 | 0.9921 | | 0.0656 | 18.0 | 1422 | 0.0816 | 0.8947 | 0.9444 | 0.9189 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 0.9444 | 0.9444 | 0.9444 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.8824 | 0.8929 | 0.8876 | 0.9919 | | 0.0602 | 19.0 | 1501 | 0.0770 | 0.8 | 0.8889 | 0.8421 | 18 | 0.8667 | 1.0 | 0.9286 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 0.9444 | 0.9444 | 0.9444 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.8409 | 0.8810 | 0.8605 | 0.9912 | | 0.0516 | 20.0 | 1580 | 0.0710 | 0.8095 | 0.9444 | 0.8718 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.8721 | 0.8929 | 0.8824 | 0.9919 | | 0.0475 | 21.0 | 1659 | 0.0686 | 0.6667 | 1.0 | 0.8 | 18 | 0.5 | 0.6154 | 0.5517 | 13 | 0.9412 | 0.8421 | 0.8889 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 0.9412 | 0.8889 | 0.9143 | 18 | 0.6667 | 1.0 | 0.8 | 8 | 0.7340 | 0.8214 | 0.7753 | 0.9904 | | 0.0431 | 22.0 | 1738 | 0.0715 | 0.8095 | 0.9444 | 0.8718 | 18 | 0.9286 | 1.0 | 0.9630 | 13 | 0.8421 | 0.8421 | 0.8421 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.75 | 0.75 | 0.75 | 4 | 0.9444 | 0.9444 | 0.9444 | 18 | 0.3529 | 0.75 | 0.48 | 8 | 0.7273 | 0.8571 | 0.7869 | 0.9933 | | 0.0383 | 23.0 | 1817 | 0.0627 | 0.8947 | 0.9444 | 0.9189 | 18 | 0.9231 | 0.9231 | 0.9231 | 13 | 0.8947 | 0.8947 | 0.8947 | 19 | 0.0 | 0.0 | 0.0 | 4 | 0.75 | 0.75 | 0.75 | 4 | 1.0 | 0.8889 | 0.9412 | 18 | 0.5714 | 1.0 | 0.7273 | 8 | 0.8111 | 0.8690 | 0.8391 | 0.9961 | | 0.0327 | 24.0 | 1896 | 0.0683 | 0.8095 | 0.9444 | 0.8718 | 18 | 0.6 | 0.9231 | 0.7273 | 13 | 0.8095 | 0.8947 | 0.8500 | 19 | 0.6 | 0.75 | 0.6667 | 4 | 0.75 | 0.75 | 0.75 | 4 | 0.9412 | 0.8889 | 0.9143 | 18 | 0.8889 | 1.0 | 0.9412 | 8 | 0.7835 | 0.9048 | 0.8398 | 0.9942 | | 0.0292 | 25.0 | 1975 | 0.0674 | 0.8947 | 0.9444 | 0.9189 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.85 | 0.8947 | 0.8718 | 19 | 1.0 | 1.0 | 1.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 1.0 | 1.0 | 1.0 | 8 | 0.9186 | 0.9405 | 0.9294 | 0.9975 | | 0.0269 | 26.0 | 2054 | 0.0691 | 0.85 | 0.9444 | 0.8947 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 1.0 | 1.0 | 1.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 1.0 | 1.0 | 1.0 | 8 | 0.9294 | 0.9405 | 0.9349 | 0.9976 | | 0.024 | 27.0 | 2133 | 0.0484 | 0.9474 | 1.0 | 0.9730 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.8947 | 0.8947 | 0.8947 | 19 | 1.0 | 1.0 | 1.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 1.0 | 1.0 | 1.0 | 8 | 0.9412 | 0.9524 | 0.9467 | 0.9979 | | 0.0221 | 28.0 | 2212 | 0.0619 | 0.85 | 0.9444 | 0.8947 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 1.0 | 1.0 | 1.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 1.0 | 1.0 | 1.0 | 8 | 0.9294 | 0.9405 | 0.9349 | 0.9976 | | 0.0216 | 29.0 | 2291 | 0.0810 | 0.85 | 0.9444 | 0.8947 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 1.0 | 1.0 | 1.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 1.0 | 0.875 | 0.9333 | 8 | 0.9286 | 0.9286 | 0.9286 | 0.9960 | | 0.0175 | 30.0 | 2370 | 0.0646 | 0.85 | 0.9444 | 0.8947 | 18 | 1.0 | 1.0 | 1.0 | 13 | 0.9444 | 0.8947 | 0.9189 | 19 | 1.0 | 1.0 | 1.0 | 4 | 0.6 | 0.75 | 0.6667 | 4 | 1.0 | 0.9444 | 0.9714 | 18 | 1.0 | 1.0 | 1.0 | 8 | 0.9294 | 0.9405 | 0.9349 | 0.9976 | ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
kimcando/final_projects
ac80897b435c8632ce4126027941dd7b72f7141b
2022-05-24T12:57:22.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
kimcando
null
kimcando/final_projects
5
null
transformers
17,323
Entry not found
ulyanaisaeva/udmurt-bert-base-uncased
804606c2ec23a3ad509bc3f633e91bc7337cc397
2022-05-30T18:18:07.000Z
[ "pytorch", "tensorboard", "bert", "fill-mask", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
fill-mask
false
ulyanaisaeva
null
ulyanaisaeva/udmurt-bert-base-uncased
5
null
transformers
17,324
--- tags: - generated_from_trainer model-index: - name: vocab2-bert-base-multilingual-uncased-udm-tsa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vocab2-bert-base-multilingual-uncased-udm-tsa This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 4.8497 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 20 - eval_batch_size: 20 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 7.3112 | 1.0 | 6419 | 6.1814 | | 5.8524 | 2.0 | 12838 | 5.4075 | | 5.3392 | 3.0 | 19257 | 5.0810 | | 5.0958 | 4.0 | 25676 | 4.9015 | | 4.9897 | 5.0 | 32095 | 4.8497 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
anablasi/finqa_model
97c67dba169cfe91315c073d7c509e2107f29fe7
2022-05-24T21:54:39.000Z
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
anablasi
null
anablasi/finqa_model
5
null
transformers
17,325
Entry not found
leander/bert-finetuned-ner
36f7572661dd18b3d43a6e75f792cced5ac7de85
2022-05-25T09:53:48.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
leander
null
leander/bert-finetuned-ner
5
null
transformers
17,326
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9329479768786128 - name: Recall type: recall value: 0.9506900033658701 - name: F1 type: f1 value: 0.9417354338584647 - name: Accuracy type: accuracy value: 0.987048919762171 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0589 - Precision: 0.9329 - Recall: 0.9507 - F1: 0.9417 - Accuracy: 0.9870 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0867 | 1.0 | 1756 | 0.0639 | 0.9140 | 0.9386 | 0.9261 | 0.9831 | | 0.0398 | 2.0 | 3512 | 0.0586 | 0.9326 | 0.9480 | 0.9402 | 0.9858 | | 0.0212 | 3.0 | 5268 | 0.0589 | 0.9329 | 0.9507 | 0.9417 | 0.9870 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
aliosm/sha3bor-footer-101-arabertv02-base
8e9f13b976fa811c89157b30e88a9f57d7de897a
2022-05-28T09:35:18.000Z
[ "pytorch", "bert", "text-classification", "ar", "transformers", "license:mit" ]
text-classification
false
aliosm
null
aliosm/sha3bor-footer-101-arabertv02-base
5
null
transformers
17,327
--- language: ar license: mit widget: - text: "إن العيون التي في طرفها حور" - text: "إذا ما فعلت الخير ضوعف شرهم" - text: "واحر قلباه ممن قلبه شبم" ---
rainbow/distilbert-base-uncased-finetuned-emotion
22977cc44f85cbed6df36e75d5a8b7aafcf65c9c
2022-05-27T11:00:53.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers" ]
text-classification
false
rainbow
null
rainbow/distilbert-base-uncased-finetuned-emotion
5
null
transformers
17,328
Entry not found
Abdelrahman-Rezk/bert-base-arabic-camelbert-mix-poetry-finetuned-qawaf
f3de8d4523971335dea5c163c307b31c5a727e86
2022-05-27T21:12:10.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
Abdelrahman-Rezk
null
Abdelrahman-Rezk/bert-base-arabic-camelbert-mix-poetry-finetuned-qawaf
5
null
transformers
17,329
Entry not found
bookpanda/wangchanberta-base-att-spm-uncased-finetuned-imdb
506ebc0ae4c54800cefafedd4434d4c1c68d098d
2022-06-09T18:17:16.000Z
[ "pytorch", "tensorboard", "camembert", "fill-mask", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
fill-mask
false
bookpanda
null
bookpanda/wangchanberta-base-att-spm-uncased-finetuned-imdb
5
null
transformers
17,330
--- tags: - generated_from_trainer model-index: - name: wangchanberta-base-att-spm-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wangchanberta-base-att-spm-uncased-finetuned-imdb This model is a fine-tuned version of [airesearch/wangchanberta-base-att-spm-uncased](https://huggingface.co/airesearch/wangchanberta-base-att-spm-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0810 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.1831 | 1.0 | 4826 | 0.1542 | | 0.1 | 2.0 | 9652 | 0.1075 | | 0.0946 | 3.0 | 14478 | 0.0443 | | 0.0618 | 4.0 | 19304 | 0.0830 | | 0.0783 | 5.0 | 24130 | 0.0810 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.11.0+cu113 - Datasets 1.17.0 - Tokenizers 0.10.3
Nithiwat/soda-berta
f871c30ca12b4c14bff46229c1b23f81900f1d24
2022-05-29T17:07:20.000Z
[ "pytorch", "camembert", "text-classification", "transformers" ]
text-classification
false
Nithiwat
null
Nithiwat/soda-berta
5
null
transformers
17,331
Entry not found
GENG/hubert_ls_2500
58a0fbea17d8aa35e8cac6ddc1b1e9cc34007626
2022-05-29T23:48:43.000Z
[ "pytorch", "hubert", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
GENG
null
GENG/hubert_ls_2500
5
null
transformers
17,332
Entry not found
GENG/hubert_ls_4500
63214ec194dc3d3c181d7973df3db0c018bc16b7
2022-05-30T00:51:48.000Z
[ "pytorch", "hubert", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
GENG
null
GENG/hubert_ls_4500
5
null
transformers
17,333
Entry not found
shafin/distilbert-similarity-b32
7b20763fe972f06e7a5c07d1d52dd9402741c260
2022-05-30T08:56:46.000Z
[ "pytorch", "distilbert", "feature-extraction", "sentence-transformers", "sentence-similarity" ]
sentence-similarity
false
shafin
null
shafin/distilbert-similarity-b32
5
null
sentence-transformers
17,334
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # shafin/distilbert-similarity-b32 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 32 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('shafin/distilbert-similarity-b32') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=shafin/distilbert-similarity-b32) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 9375 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.OnlineContrastiveLoss.OnlineContrastiveLoss` Parameters of the fit()-Method: ``` { "epochs": 15, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 3000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 256, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) (3): Dense({'in_features': 256, 'out_features': 32, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
VanessaSchenkel/mbart-large-50-finetuned-opus-en-pt-translation-finetuned-en-to-pt-dataset-opus-books
2a05c65c692041ababd2b6851d60c97bd7419bd6
2022-05-30T16:38:08.000Z
[ "pytorch", "tensorboard", "mbart", "text2text-generation", "dataset:opus_books", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
VanessaSchenkel
null
VanessaSchenkel/mbart-large-50-finetuned-opus-en-pt-translation-finetuned-en-to-pt-dataset-opus-books
5
null
transformers
17,335
--- tags: - generated_from_trainer datasets: - opus_books model-index: - name: mbart-large-50-finetuned-opus-en-pt-translation-finetuned-en-to-pt-dataset-opus-books results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-large-50-finetuned-opus-en-pt-translation-finetuned-en-to-pt-dataset-opus-books This model is a fine-tuned version of [Narrativa/mbart-large-50-finetuned-opus-en-pt-translation](https://huggingface.co/Narrativa/mbart-large-50-finetuned-opus-en-pt-translation) on the opus_books dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:| | No log | 1.0 | 79 | 1.5854 | 31.2219 | 26.9149 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
theojolliffe/bart-cnn-science-v3-e2
2504418b3f40457c11c14fad4f5a80c7f25c52fc
2022-05-30T21:42:44.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
theojolliffe
null
theojolliffe/bart-cnn-science-v3-e2
5
null
transformers
17,336
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: bart-cnn-science-v3-e2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-cnn-science-v3-e2 This model is a fine-tuned version of [theojolliffe/bart-cnn-science](https://huggingface.co/theojolliffe/bart-cnn-science) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.9352 - Rouge1: 52.5497 - Rouge2: 32.5507 - Rougel: 35.0014 - Rougelsum: 50.0575 - Gen Len: 141.5741 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | No log | 1.0 | 398 | 1.0023 | 52.0744 | 31.917 | 33.2804 | 49.6569 | 142.0 | | 1.1851 | 2.0 | 796 | 0.9352 | 52.5497 | 32.5507 | 35.0014 | 50.0575 | 141.5741 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
YeRyeongLee/electra-base-discriminator-finetuned-removed-0530
d220014b57fc8e789e0186f2ff0fdff9b903ac24
2022-05-31T10:46:25.000Z
[ "pytorch", "electra", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
YeRyeongLee
null
YeRyeongLee/electra-base-discriminator-finetuned-removed-0530
5
null
transformers
17,337
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: electra-base-discriminator-finetuned-removed-0530 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # electra-base-discriminator-finetuned-removed-0530 This model is a fine-tuned version of [google/electra-base-discriminator](https://huggingface.co/google/electra-base-discriminator) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.9713 - Accuracy: 0.8824 - F1: 0.8824 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:| | No log | 1.0 | 3180 | 0.6265 | 0.8107 | 0.8128 | | No log | 2.0 | 6360 | 0.5158 | 0.8544 | 0.8541 | | No log | 3.0 | 9540 | 0.6686 | 0.8563 | 0.8567 | | No log | 4.0 | 12720 | 0.6491 | 0.8711 | 0.8709 | | No log | 5.0 | 15900 | 0.8048 | 0.8660 | 0.8672 | | No log | 6.0 | 19080 | 0.8110 | 0.8708 | 0.8710 | | No log | 7.0 | 22260 | 1.0082 | 0.8651 | 0.8640 | | 0.2976 | 8.0 | 25440 | 0.8343 | 0.8811 | 0.8814 | | 0.2976 | 9.0 | 28620 | 0.9366 | 0.8780 | 0.8780 | | 0.2976 | 10.0 | 31800 | 0.9713 | 0.8824 | 0.8824 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.9.0 - Datasets 1.16.1 - Tokenizers 0.12.1
upsalite/bert-base-german-cased-finetuned-emotion
826a182b35d62165dfe2ddb2ec3af72007fec43f
2022-06-22T12:51:41.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
upsalite
null
upsalite/bert-base-german-cased-finetuned-emotion
5
null
transformers
17,338
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: bert-base-german-cased-finetuned-emotion results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-german-cased-finetuned-emotion This model is a fine-tuned version of [bert-base-german-cased](https://huggingface.co/bert-base-german-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.2345 - Accuracy: 0.6937 - F1: 0.6929 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 1.9412 | 1.0 | 140 | 1.4481 | 0.5402 | 0.5229 | | 1.1779 | 2.0 | 280 | 1.1625 | 0.6375 | 0.6350 | | 0.7914 | 3.0 | 420 | 1.0541 | 0.6732 | 0.6700 | | 0.5264 | 4.0 | 560 | 1.0504 | 0.6821 | 0.6803 | | 0.344 | 5.0 | 700 | 1.0638 | 0.6884 | 0.6853 | | 0.2187 | 6.0 | 840 | 1.1309 | 0.6964 | 0.6945 | | 0.1387 | 7.0 | 980 | 1.1504 | 0.7009 | 0.6986 | | 0.0988 | 8.0 | 1120 | 1.2012 | 0.6964 | 0.6944 | | 0.0705 | 9.0 | 1260 | 1.2153 | 0.7009 | 0.7003 | | 0.0571 | 10.0 | 1400 | 1.2345 | 0.6937 | 0.6929 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.12.1
dexay/reDs
0b591fbec56f01b965243bdeb326dc38d8b2f951
2022-05-31T13:02:48.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
dexay
null
dexay/reDs
5
null
transformers
17,339
Entry not found
StanKrewinkel/finetuning-sentiment-model-3000-samples
38e4186e992978c3a8c7bd8313e91e88a33dcce7
2022-06-02T07:06:19.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers" ]
text-classification
false
StanKrewinkel
null
StanKrewinkel/finetuning-sentiment-model-3000-samples
5
null
transformers
17,340
Entry not found
wuxiaofei/finetuning-sentiment-model-3000-samples
7dee0f8f4aa4e9a597822709fe5264b6a0fc0949
2022-05-31T15:12:52.000Z
[ "pytorch", "distilbert", "text-classification", "dataset:imdb", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
wuxiaofei
null
wuxiaofei/finetuning-sentiment-model-3000-samples
5
null
transformers
17,341
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb args: plain_text metrics: - name: Accuracy type: accuracy value: 0.86 - name: F1 type: f1 value: 0.8636363636363636 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6787 - Accuracy: 0.86 - F1: 0.8636 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu102 - Datasets 2.2.2 - Tokenizers 0.12.1
oliverguhr/wav2vec2-large-xlsr-53-german
3bc9db99ad960e4be76eaa003f5b0e749591efca
2022-06-01T10:48:43.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "model-index" ]
automatic-speech-recognition
false
oliverguhr
null
oliverguhr/wav2vec2-large-xlsr-53-german
5
null
transformers
17,342
--- language: - de tags: - automatic-speech-recognition - mozilla-foundation/common_voice_8_0 - generated_from_trainer datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: XLSR Wav2Vec2 Large German by Oliver Guhr results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 8 type: mozilla-foundation/common_voice_8_0 args: de metrics: - name: Test WER type: wer value: 10.29 - name: Test CER type: cer value: 2.51 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53-german-cv8-dropout-30epoch This model is a fine-tuned version of [./wav2vec2-large-xlsr-53-german-cv8-dropout-30epoch](https://huggingface.co/./wav2vec2-large-xlsr-53-german-cv8-dropout-30epoch) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - DE dataset. It achieves the following results on the test set: - Wer: 10.29% - CER: 2.51% ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 20.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:------:|:---------------:|:------:| | 0.2081 | 1.0 | 6815 | 0.1784 | 0.1910 | | 0.1686 | 2.0 | 13630 | 0.1621 | 0.1725 | | 0.1515 | 3.0 | 20445 | 0.1569 | 0.1649 | | 0.1426 | 4.0 | 27260 | 0.1466 | 0.1681 | | 0.135 | 5.0 | 34075 | 0.1357 | 0.1410 | | 0.1093 | 6.0 | 40890 | 0.1313 | 0.1436 | | 0.1 | 7.0 | 47705 | 0.1242 | 0.1250 | | 0.0999 | 8.0 | 54520 | 0.1191 | 0.1218 | | 0.084 | 9.0 | 61335 | 0.1134 | 0.1164 | | 0.0752 | 10.0 | 68150 | 0.1111 | 0.1117 | | 0.0724 | 11.0 | 6815 | 0.1222 | 0.1206 | | 0.0726 | 12.0 | 13630 | 0.1241 | 0.1247 | | 0.0816 | 13.0 | 20445 | 0.1235 | 0.1174 | | 0.0814 | 14.0 | 27260 | 0.1231 | 0.1238 | | 0.063 | 15.0 | 34075 | 0.1171 | 0.1159 | | 0.0793 | 16.0 | 40890 | 0.1158 | 0.1168 | | 0.0686 | 17.0 | 47705 | 0.1187 | 0.1151 | | 0.071 | 18.0 | 54520 | 0.1170 | 0.1182 | | 0.0629 | 19.0 | 61335 | 0.1160 | 0.1085 | | 0.0558 | 20.0 | 68150 | 0.1154 | 0.1093 | | 0.0531 | 21.0 | 74965 | 0.1175 | 0.1044 | | 0.0648 | 22.0 | 81780 | 0.1172 | 0.1056 | | 0.0513 | 23.0 | 88595 | 0.1180 | 0.1048 | | 0.0496 | 24.0 | 95410 | 0.1197 | 0.1025 | | 0.0549 | 25.0 | 102225 | 0.1184 | 0.0991 | | 0.0493 | 26.0 | 109040 | 0.1176 | 0.0977 | | 0.0445 | 27.0 | 115855 | 0.1178 | 0.0989 | | 0.0451 | 28.0 | 122670 | 0.1188 | 0.0992 | | 0.045 | 29.0 | 129485 | 0.1182 | 0.0990 | | 0.0452 | 30.0 | 136300 | 0.1190 | 0.0980 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
ShoneRan/bert-emotion
0c11c1f65eeff0572218f6dcdb3e5985b24678f8
2022-06-02T05:15:37.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:tweet_eval", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
ShoneRan
null
ShoneRan/bert-emotion
5
null
transformers
17,343
--- license: apache-2.0 tags: - generated_from_trainer datasets: - tweet_eval metrics: - precision - recall model-index: - name: bert-emotion results: - task: name: Text Classification type: text-classification dataset: name: tweet_eval type: tweet_eval args: emotion metrics: - name: Precision type: precision value: 0.7262254187805659 - name: Recall type: recall value: 0.725549671319356 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-emotion This model is a fine-tuned version of [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) on the tweet_eval dataset. It achieves the following results on the evaluation set: - Loss: 1.1670 - Precision: 0.7262 - Recall: 0.7255 - Fscore: 0.7253 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | Fscore | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:| | 0.8561 | 1.0 | 815 | 0.7844 | 0.7575 | 0.6081 | 0.6253 | | 0.5337 | 2.0 | 1630 | 0.9080 | 0.7567 | 0.7236 | 0.7325 | | 0.2573 | 3.0 | 2445 | 1.1670 | 0.7262 | 0.7255 | 0.7253 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
etch/distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2
b10369f234ed7caae10b163e6916abda2350ac4a
2022-06-02T19:36:13.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
etch
null
etch/distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2
5
null
transformers
17,344
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2 results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: sst2 metrics: - name: Accuracy type: accuracy value: 0.9059633027522935 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-sst-2-english-finetuned-sst2 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.3950 - Accuracy: 0.9060 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0818 | 1.0 | 4210 | 0.3950 | 0.9060 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Umer4/UrduAudio2Text
5ec0f811c9b78ba7dc95af5277c5e4c5300b5e00
2022-06-04T16:17:45.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Umer4
null
Umer4/UrduAudio2Text
5
null
transformers
17,345
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: UrduAudio2Text results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # UrduAudio2Text This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 1.4978 - Wer: 0.8376 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.5558 | 15.98 | 400 | 1.4978 | 0.8376 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 2.2.2 - Tokenizers 0.10.3
Edric111/distilbert-base-uncased-finetuned-ner
117a8fa42fdb2ebd457272b95b127488ace6972f
2022-06-05T16:32:56.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
Edric111
null
Edric111/distilbert-base-uncased-finetuned-ner
5
null
transformers
17,346
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9273854328093868 - name: Recall type: recall value: 0.9372413021590782 - name: F1 type: f1 value: 0.9322873198686918 - name: Accuracy type: accuracy value: 0.9840341874910639 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0599 - Precision: 0.9274 - Recall: 0.9372 - F1: 0.9323 - Accuracy: 0.9840 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2378 | 1.0 | 878 | 0.0719 | 0.9107 | 0.9200 | 0.9154 | 0.9801 | | 0.0509 | 2.0 | 1756 | 0.0620 | 0.9156 | 0.9311 | 0.9233 | 0.9821 | | 0.0307 | 3.0 | 2634 | 0.0599 | 0.9274 | 0.9372 | 0.9323 | 0.9840 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
enteramine/distilbert-base-uncased-finetuned-new-imdb
69e7405229025f7a5a5022f6f22fa85d13ce45c9
2022-06-03T17:38:46.000Z
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "dataset:imdb", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
enteramine
null
enteramine/distilbert-base-uncased-finetuned-new-imdb
5
null
transformers
17,347
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: distilbert-base-uncased-finetuned-new-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-new-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 2.4367 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.6828 | 1.0 | 157 | 2.5231 | | 2.5621 | 2.0 | 314 | 2.4732 | | 2.5255 | 3.0 | 471 | 2.4367 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
roshnir/mBert-finetuned-mlqa-dev-zh-hi
0e30d6dd2064178c9f5caadf8dfae27830506b79
2022-06-03T20:43:49.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/mBert-finetuned-mlqa-dev-zh-hi
5
null
transformers
17,348
Entry not found
Jeevesh8/lecun_feather_berts-69
7bba220362f89745c10c57712c95734b8243ed0c
2022-06-04T06:50:39.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-69
5
null
transformers
17,349
Entry not found
Jeevesh8/lecun_feather_berts-55
16329d3b324476a3a4789aed79d1346dad5f9d6b
2022-06-04T06:50:51.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-55
5
null
transformers
17,350
Entry not found
Jeevesh8/lecun_feather_berts-34
b774daaba9d58659732f19399cbb5736518cb8eb
2022-06-04T06:51:04.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-34
5
null
transformers
17,351
Entry not found
Jeevesh8/lecun_feather_berts-73
c8b90ccb7db876a179b6f118928d0afeb37aad28
2022-06-04T06:50:54.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-73
5
null
transformers
17,352
Entry not found
Jeevesh8/lecun_feather_berts-59
6d3783a8cb5d5e395c60b870eb12b0d0432665a5
2022-06-04T06:51:06.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-59
5
null
transformers
17,353
Entry not found
Jeevesh8/lecun_feather_berts-57
f0d405dba0fb87667d5fb6eac193fd38be26485b
2022-06-04T06:50:58.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-57
5
null
transformers
17,354
Entry not found
Jeevesh8/lecun_feather_berts-60
a7797823d13077ed8df483bdf259bf07610a8d2b
2022-06-04T06:50:57.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-60
5
null
transformers
17,355
Entry not found
Jeevesh8/lecun_feather_berts-25
e7721cba6e1645d5c15042383eb6a4aee64293d1
2022-06-04T06:51:47.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-25
5
null
transformers
17,356
Entry not found
Jeevesh8/lecun_feather_berts-23
75f3a18507a798ba4f01f12e51a8500859285c2f
2022-06-04T06:51:51.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-23
5
null
transformers
17,357
Entry not found
Jeevesh8/lecun_feather_berts-28
cae86909ec7e7a7cd6c2a28e0bde42b0c69483e0
2022-06-04T06:51:55.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-28
5
null
transformers
17,358
Entry not found
Jeevesh8/lecun_feather_berts-32
23fc3c17edf9bd0adc9da5037aac68aed2bc46d6
2022-06-04T06:53:24.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-32
5
null
transformers
17,359
Entry not found
Jeevesh8/lecun_feather_berts-6
cd5bd58b50efb589251da8643ed7061fc7e97706
2022-06-04T06:52:17.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-6
5
null
transformers
17,360
Entry not found
Jeevesh8/lecun_feather_berts-13
247c000b1fbc2d2126655bd2b7c73c07fe26be49
2022-06-04T06:51:39.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-13
5
null
transformers
17,361
Entry not found
Jeevesh8/lecun_feather_berts-17
598f081cfab0589085c3bd6a713818449fa957bf
2022-06-04T06:52:10.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-17
5
null
transformers
17,362
Entry not found
Jeevesh8/lecun_feather_berts-15
42c948b587dc556dbabe144be8c8e668eba88999
2022-06-04T06:52:22.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-15
5
null
transformers
17,363
Entry not found
Jeevesh8/lecun_feather_berts-16
e2a77409ad4d0be6d9e4974e8dfb613b833de1eb
2022-06-04T06:52:01.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-16
5
null
transformers
17,364
Entry not found
Jeevesh8/lecun_feather_berts-94
05d4cb4386f74afdde1f7d94cca485954e1f02f5
2022-06-04T06:51:02.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-94
5
null
transformers
17,365
Entry not found
Jeevesh8/lecun_feather_berts-74
e665870ee38b556af9a3471aeafbf5f2c9ca3eb1
2022-06-04T06:51:09.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-74
5
null
transformers
17,366
Entry not found
Jeevesh8/lecun_feather_berts-87
510c10695a63a4e5cbf00968e1152bd2bfebe6bc
2022-06-04T06:53:07.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/lecun_feather_berts-87
5
null
transformers
17,367
Entry not found
lbw/distilbert-base-uncased-finetuned-ner
623fa25c4a440e12399d0bcd4cca82e55af92e30
2022-06-04T07:44:07.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
lbw
null
lbw/distilbert-base-uncased-finetuned-ner
5
null
transformers
17,368
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9279388974983396 - name: Recall type: recall value: 0.9378006488421524 - name: F1 type: f1 value: 0.9328437100094585 - name: Accuracy type: accuracy value: 0.9839706419686403 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0596 - Precision: 0.9279 - Recall: 0.9378 - F1: 0.9328 - Accuracy: 0.9840 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2377 | 1.0 | 878 | 0.0717 | 0.9140 | 0.9205 | 0.9172 | 0.9800 | | 0.0498 | 2.0 | 1756 | 0.0609 | 0.9168 | 0.9332 | 0.9249 | 0.9827 | | 0.0301 | 3.0 | 2634 | 0.0596 | 0.9279 | 0.9378 | 0.9328 | 0.9840 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
bubblecookie/samsum_trained_t5_model
d19be31791c1e61ad5b7c428a0a519b80d65fada
2022-06-04T13:32:18.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
bubblecookie
null
bubblecookie/samsum_trained_t5_model
5
null
transformers
17,369
Entry not found
yanekyuk/bert-cased-keyword-discriminator
fc2e64f87118dc006117d59bf6a877ca64cda0f3
2022-06-04T20:24:14.000Z
[ "pytorch", "bert", "token-classification", "en", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
yanekyuk
null
yanekyuk/bert-cased-keyword-discriminator
5
null
transformers
17,370
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - accuracy - f1 language: - en widget: - text: "Broadcom agreed to acquire cloud computing company VMware in a $61 billion (€57bn) cash-and stock deal, massively diversifying the chipmaker’s business and almost tripling its software-related revenue to about 45% of its total sales. By the numbers: VMware shareholders will receive either $142.50 in cash or 0.2520 of a Broadcom share for each VMware stock. Broadcom will also assume $8 billion of VMware's net debt." - text: "Canadian Natural Resources Minister Jonathan Wilkinson told Bloomberg that the country could start supplying Europe with liquefied natural gas (LNG) in as soon as three years by converting an existing LNG import facility on Canada’s Atlantic coast into an export terminal. Bottom line: Wilkinson said what Canada cares about is that the new LNG facility uses a low-emission process for the gas and is capable of transitioning to exporting hydrogen later on." - text: "Google is being investigated by the UK’s antitrust watchdog for its dominance in the \"ad tech stack,\" the set of services that facilitate the sale of online advertising space between advertisers and sellers. Google has strong positions at various levels of the ad tech stack and charges fees to both publishers and advertisers. A step back: UK Competition and Markets Authority has also been investigating whether Google and Meta colluded over ads, probing into the advertising agreement between the two companies, codenamed Jedi Blue." - text: "Shares in Twitter closed 6.35% up after an SEC 13D filing revealed that Elon Musk pledged to put up an additional $6.25 billion of his own wealth to fund the $44 billion takeover deal, lifting the total to $33.5 billion from an initial $27.25 billion. In other news: Former Twitter CEO Jack Dorsey announced he's stepping down, but would stay on Twitter’s board \\“until his term expires at the 2022 meeting of stockholders.\"" model-index: - name: bert-keyword-discriminator results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-keyword-discriminator This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1310 - Precision: 0.8522 - Recall: 0.8868 - Accuracy: 0.9732 - F1: 0.8692 - Ent/precision: 0.8874 - Ent/accuracy: 0.9246 - Ent/f1: 0.9056 - Con/precision: 0.8011 - Con/accuracy: 0.8320 - Con/f1: 0.8163 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | Accuracy | F1 | Ent/precision | Ent/accuracy | Ent/f1 | Con/precision | Con/accuracy | Con/f1 | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:--------:|:------:|:-------------:|:------------:|:------:|:-------------:|:------------:|:------:| | 0.1744 | 1.0 | 1875 | 0.1261 | 0.7176 | 0.7710 | 0.9494 | 0.7433 | 0.7586 | 0.8503 | 0.8018 | 0.6514 | 0.6561 | 0.6537 | | 0.1261 | 2.0 | 3750 | 0.1041 | 0.7742 | 0.8057 | 0.9600 | 0.7896 | 0.8083 | 0.8816 | 0.8433 | 0.7185 | 0.6957 | 0.7070 | | 0.0878 | 3.0 | 5625 | 0.0979 | 0.8176 | 0.8140 | 0.9655 | 0.8158 | 0.8518 | 0.8789 | 0.8651 | 0.7634 | 0.7199 | 0.7410 | | 0.0625 | 4.0 | 7500 | 0.0976 | 0.8228 | 0.8643 | 0.9696 | 0.8430 | 0.8515 | 0.9182 | 0.8836 | 0.7784 | 0.7862 | 0.7823 | | 0.0456 | 5.0 | 9375 | 0.1047 | 0.8304 | 0.8758 | 0.9704 | 0.8525 | 0.8758 | 0.9189 | 0.8968 | 0.7655 | 0.8133 | 0.7887 | | 0.0342 | 6.0 | 11250 | 0.1207 | 0.8363 | 0.8887 | 0.9719 | 0.8617 | 0.8719 | 0.9274 | 0.8988 | 0.7846 | 0.8327 | 0.8080 | | 0.0256 | 7.0 | 13125 | 0.1241 | 0.848 | 0.8892 | 0.9731 | 0.8681 | 0.8791 | 0.9299 | 0.9038 | 0.8019 | 0.8302 | 0.8158 | | 0.0205 | 8.0 | 15000 | 0.1310 | 0.8522 | 0.8868 | 0.9732 | 0.8692 | 0.8874 | 0.9246 | 0.9056 | 0.8011 | 0.8320 | 0.8163 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
helliun/distilbert-gaydar
5fa91e4d5895c01639338a85ac72ab6d38f57f34
2022-06-04T21:44:20.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
helliun
null
helliun/distilbert-gaydar
5
null
transformers
17,371
Entry not found
nestoralvaro/mT5_multilingual_XLSum-finetuned-xsum-xsum
a3f6bb5f3c8c5241a3096b0670726ab89668c4c0
2022-06-05T19:30:12.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
nestoralvaro
null
nestoralvaro/mT5_multilingual_XLSum-finetuned-xsum-xsum
5
null
transformers
17,372
--- tags: - generated_from_trainer datasets: - xsum metrics: - rouge model-index: - name: mT5_multilingual_XLSum-finetuned-xsum-xsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: xsum type: xsum args: default metrics: - name: Rouge1 type: rouge value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mT5_multilingual_XLSum-finetuned-xsum-xsum This model is a fine-tuned version of [csebuetnlp/mT5_multilingual_XLSum](https://huggingface.co/csebuetnlp/mT5_multilingual_XLSum) on the xsum dataset. It achieves the following results on the evaluation set: - Loss: nan - Rouge1: 0.0 - Rouge2: 0.0 - Rougel: 0.0 - Rougelsum: 0.0 - Gen Len: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:------:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 0.0 | 1.0 | 102023 | nan | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
bondi/bert-semaphore-prediction-w8
2a8204861ddb1578b69ea2ed67a16e2cbcb460b3
2022-06-06T02:36:31.000Z
[ "pytorch", "bert", "text-classification", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
bondi
null
bondi/bert-semaphore-prediction-w8
5
null
transformers
17,373
--- tags: - generated_from_trainer model-index: - name: bert-semaphore-prediction-w8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-semaphore-prediction-w8 This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 6 - eval_batch_size: 6 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0 - Datasets 2.2.2 - Tokenizers 0.12.1
seonghee/bert-base-uncased-emotion
e5ab47314fd88d61c6c82f491f80d851c26ae0c6
2022-06-06T05:33:54.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
seonghee
null
seonghee/bert-base-uncased-emotion
5
null
transformers
17,374
Entry not found
bekirbakar/wav2vec2-large-xls-r-300m-finnish
2709d9fa3126059a424c43aa7ea511d8cbfbaf51
2022-06-16T13:34:45.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
bekirbakar
null
bekirbakar/wav2vec2-large-xls-r-300m-finnish
5
null
transformers
17,375
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-finnish results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-finnish This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.4747 - Wer: 0.5143 ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.1666 | 14.8 | 400 | 0.4747 | 0.5143 | | 0.0875 | 29.62 | 800 | 0.4747 | 0.5143 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
logo-data-science/mt5-logo-qg-qa-turkish
f6c8539ccf517caddf95152f7af6beb09a933673
2022-06-06T14:55:17.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "license:gpl", "autotrain_compatible" ]
text2text-generation
false
logo-data-science
null
logo-data-science/mt5-logo-qg-qa-turkish
5
null
transformers
17,376
--- license: gpl ---
bondi/bert-clean-semaphore-prediction-w0
775537aea2b848a93caa88601e72ff48b41e968a
2022-06-07T05:54:44.000Z
[ "pytorch", "bert", "text-classification", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
bondi
null
bondi/bert-clean-semaphore-prediction-w0
5
null
transformers
17,377
--- tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: bert-clean-semaphore-prediction-w0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-clean-semaphore-prediction-w0 This model is a fine-tuned version of [dccuchile/bert-base-spanish-wwm-cased](https://huggingface.co/dccuchile/bert-base-spanish-wwm-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0680 - Accuracy: 0.9693 - F1: 0.9694 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0 - Datasets 2.2.2 - Tokenizers 0.12.1
DanielSM/1444Test
ebc5f32fee2a59cb2b0703f4e101f4ab69743cc6
2022-06-07T06:24:02.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
DanielSM
null
DanielSM/1444Test
5
null
transformers
17,378
Entry not found
bondi/bert-clean-semaphore-prediction-w4
1dca3c92c9c418f2da8ddfe188fd40829bc1047e
2022-06-07T07:55:16.000Z
[ "pytorch", "bert", "text-classification", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
bondi
null
bondi/bert-clean-semaphore-prediction-w4
5
null
transformers
17,379
--- tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: bert-clean-semaphore-prediction-w4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-clean-semaphore-prediction-w4 This model is a fine-tuned version of [dccuchile/bert-base-spanish-wwm-cased](https://huggingface.co/dccuchile/bert-base-spanish-wwm-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0747 - Accuracy: 0.9652 - F1: 0.9651 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0 - Datasets 2.2.2 - Tokenizers 0.12.1
bondi/bert-clean-semaphore-prediction-w8
6b22353f2bc3644d9976ac0b76c1958107503d6d
2022-06-07T08:55:38.000Z
[ "pytorch", "bert", "text-classification", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
bondi
null
bondi/bert-clean-semaphore-prediction-w8
5
null
transformers
17,380
--- tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: bert-clean-semaphore-prediction-w8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-clean-semaphore-prediction-w8 This model is a fine-tuned version of [dccuchile/bert-base-spanish-wwm-cased](https://huggingface.co/dccuchile/bert-base-spanish-wwm-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0669 - Accuracy: 0.9671 - F1: 0.9672 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0 - Datasets 2.2.2 - Tokenizers 0.12.1
Matthijs/ane-distilbert-test
e4c33a637122614bcd26939f1236fe87f853faa2
2022-06-07T14:14:12.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
Matthijs
null
Matthijs/ane-distilbert-test
5
null
transformers
17,381
Entry not found
mmillet/distilrubert-tiny-cased-conversational-v1_finetuned_emotion_experiment_augmented_anger_fear
3af6c59503cdf5538a4845029329e9e9e9cdd1b7
2022-06-08T16:10:06.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
mmillet
null
mmillet/distilrubert-tiny-cased-conversational-v1_finetuned_emotion_experiment_augmented_anger_fear
5
null
transformers
17,382
--- tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: distilrubert-tiny-cased-conversational-v1_finetuned_emotion_experiment_augmented_anger_fear results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilrubert-tiny-cased-conversational-v1_finetuned_emotion_experiment_augmented_anger_fear This model is a fine-tuned version of [DeepPavlov/distilrubert-tiny-cased-conversational-v1](https://huggingface.co/DeepPavlov/distilrubert-tiny-cased-conversational-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3760 - Accuracy: 0.8758 - F1: 0.8750 - Precision: 0.8753 - Recall: 0.8758 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=0.0001 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | 1.2636 | 1.0 | 69 | 1.0914 | 0.6013 | 0.5599 | 0.5780 | 0.6013 | | 1.029 | 2.0 | 138 | 0.9180 | 0.6514 | 0.6344 | 0.6356 | 0.6514 | | 0.904 | 3.0 | 207 | 0.8235 | 0.6827 | 0.6588 | 0.6904 | 0.6827 | | 0.8084 | 4.0 | 276 | 0.7272 | 0.7537 | 0.7477 | 0.7564 | 0.7537 | | 0.7242 | 5.0 | 345 | 0.6435 | 0.7860 | 0.7841 | 0.7861 | 0.7860 | | 0.6305 | 6.0 | 414 | 0.5543 | 0.8173 | 0.8156 | 0.8200 | 0.8173 | | 0.562 | 7.0 | 483 | 0.4860 | 0.8392 | 0.8383 | 0.8411 | 0.8392 | | 0.5042 | 8.0 | 552 | 0.4474 | 0.8528 | 0.8514 | 0.8546 | 0.8528 | | 0.4535 | 9.0 | 621 | 0.4213 | 0.8580 | 0.8579 | 0.8590 | 0.8580 | | 0.4338 | 10.0 | 690 | 0.4106 | 0.8591 | 0.8578 | 0.8605 | 0.8591 | | 0.4026 | 11.0 | 759 | 0.4064 | 0.8622 | 0.8615 | 0.8632 | 0.8622 | | 0.3861 | 12.0 | 828 | 0.3874 | 0.8737 | 0.8728 | 0.8733 | 0.8737 | | 0.3709 | 13.0 | 897 | 0.3841 | 0.8706 | 0.8696 | 0.8701 | 0.8706 | | 0.3592 | 14.0 | 966 | 0.3841 | 0.8716 | 0.8709 | 0.8714 | 0.8716 | | 0.3475 | 15.0 | 1035 | 0.3834 | 0.8737 | 0.8728 | 0.8732 | 0.8737 | | 0.3537 | 16.0 | 1104 | 0.3805 | 0.8727 | 0.8717 | 0.8722 | 0.8727 | | 0.3317 | 17.0 | 1173 | 0.3775 | 0.8747 | 0.8739 | 0.8741 | 0.8747 | | 0.323 | 18.0 | 1242 | 0.3759 | 0.8727 | 0.8718 | 0.8721 | 0.8727 | | 0.3327 | 19.0 | 1311 | 0.3776 | 0.8758 | 0.8750 | 0.8756 | 0.8758 | | 0.3339 | 20.0 | 1380 | 0.3760 | 0.8758 | 0.8750 | 0.8753 | 0.8758 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
huggingtweets/makimasdoggy
3fddcd9fa61916b8ca12206908de94800f45665c
2022-06-08T19:17:06.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/makimasdoggy
5
null
transformers
17,383
--- language: en thumbnail: http://www.huggingtweets.com/makimasdoggy/1654715821978/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1534537330014445569/ql3I-npY_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Vanser</div> <div style="text-align: center; font-size: 14px;">@makimasdoggy</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Vanser. | Data | Vanser | | --- | --- | | Tweets downloaded | 3249 | | Retweets | 1548 | | Short tweets | 346 | | Tweets kept | 1355 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/66wk3fyw/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @makimasdoggy's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2di8hgps) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2di8hgps/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/makimasdoggy') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
huggingtweets/verizon
d1a63d4f7bc835e46ebb9303b380230d4aef3d21
2022-06-09T00:33:36.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/verizon
5
null
transformers
17,384
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1496892874276880389/ndAolYWm_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Verizon</div> <div style="text-align: center; font-size: 14px;">@verizon</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Verizon. | Data | Verizon | | --- | --- | | Tweets downloaded | 3246 | | Retweets | 408 | | Short tweets | 188 | | Tweets kept | 2650 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2rssnlth/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @verizon's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/17qcsqw6) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/17qcsqw6/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/verizon') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Skil-Internal/bart-paraphrase-finetuned-xsum-v4
4702e43bb02ebe990ce525bf5a8028508a436d21
2022-06-09T08:52:10.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
Skil-Internal
null
Skil-Internal/bart-paraphrase-finetuned-xsum-v4
5
null
transformers
17,385
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: bart-paraphrase-finetuned-xsum-v4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-paraphrase-finetuned-xsum-v4 This model is a fine-tuned version of [eugenesiow/bart-paraphrase](https://huggingface.co/eugenesiow/bart-paraphrase) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.1765 - Rouge1: 49.972 - Rouge2: 49.85 - Rougel: 49.9165 - Rougelsum: 49.7819 - Gen Len: 8.3061 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | No log | 1.0 | 263 | 0.5050 | 47.9628 | 47.7085 | 47.8625 | 47.772 | 6.9639 | | 0.676 | 2.0 | 526 | 0.5793 | 49.6085 | 49.3495 | 49.5196 | 49.4173 | 7.4715 | | 0.676 | 3.0 | 789 | 0.7011 | 49.8635 | 49.6937 | 49.8155 | 49.6604 | 7.576 | | 0.322 | 4.0 | 1052 | 0.7585 | 49.8851 | 49.7578 | 49.8526 | 49.6977 | 7.6654 | | 0.322 | 5.0 | 1315 | 0.6615 | 49.861 | 49.7185 | 49.7978 | 49.6669 | 8.3023 | | 0.2828 | 6.0 | 1578 | 0.6233 | 49.916 | 49.7819 | 49.8861 | 49.7384 | 7.6084 | | 0.2828 | 7.0 | 1841 | 0.9380 | 49.916 | 49.7819 | 49.8861 | 49.7384 | 8.2433 | | 0.2073 | 8.0 | 2104 | 0.8497 | 49.9624 | 49.8355 | 49.91 | 49.7666 | 7.6331 | | 0.2073 | 9.0 | 2367 | 0.7715 | 49.972 | 49.85 | 49.9165 | 49.7819 | 7.9772 | | 0.1744 | 10.0 | 2630 | 1.1765 | 49.972 | 49.85 | 49.9165 | 49.7819 | 8.3061 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
ghadeermobasher/WLT-PubMedBERT-NCBI
4fcf51cc796410aff255f7557c8224a76a8746ed
2022-06-09T10:28:35.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/WLT-PubMedBERT-NCBI
5
null
transformers
17,386
Entry not found
radiogroup-crits/voxpopuli_base_it_2_5_gram_doc4lm
47526bc9ae9e990587cb12de8746ba2643713be1
2022-06-15T09:31:23.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "it", "dataset:voxpopuli-v2", "transformers", "audio", "hf-asr-leaderboard", "voxpopuli-v2", "speech", "license:apache-2.0" ]
automatic-speech-recognition
false
radiogroup-crits
null
radiogroup-crits/voxpopuli_base_it_2_5_gram_doc4lm
5
null
transformers
17,387
--- language: - it license: apache-2.0 datasets: - voxpopuli-v2 metrics: - wer - cer tags: - audio - automatic-speech-recognition - hf-asr-leaderboard - it - voxpopuli-v2 - speech - wav2vec2 --- # VOXPOLULI_BASE_IT_2_5_GRAM_DOC4LM ## Language model information Our language model was generated using a dataset of Italian wikipedia articles and manual transcriptions of radio newspapers and television programs. ## Citation If you want to cite this model you can use this: ```bibtex @misc{crits2022voxpopuli_base_it_2_5_gram_doc4lm, title={Wav2Vec2 with LM Italian by radiogroup crits}, author={Teraoni Prioletti Raffaele, Casagranda Paolo and Russo Francesco}, publisher={Hugging Face}, journal={Hugging Face Hub}, howpublished={\url{https://huggingface.co/radiogroup-crits/voxpopuli_base_it_2_5_gram_doc4lm}}, year={2022} } ```
ghadeermobasher/WLT-BlueBERT-BC4CHEMD
d8deaf41766b9e670468d3d7a9f851c8fcfb4e9d
2022-06-09T16:47:23.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/WLT-BlueBERT-BC4CHEMD
5
null
transformers
17,388
Entry not found
ghadeermobasher/WLT-SciBERT-BC4CHEMD-O
b58115af8bce48d6edd3963daab31976bcf9b8b9
2022-06-09T13:45:04.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/WLT-SciBERT-BC4CHEMD-O
5
null
transformers
17,389
Entry not found
Peltarion/dnabert-minilm-small
88883e0c412bcccefd2846c4cda67e5eaa06b578
2022-07-02T11:29:00.000Z
[ "pytorch", "bert", "transformers", "DNA", "license:mit" ]
null
false
Peltarion
null
Peltarion/dnabert-minilm-small
5
null
transformers
17,390
--- tags: - DNA license: mit --- ## MiniDNA small model This is a distilled version of [DNABERT](https://github.com/jerryji1993/DNABERT) by using MiniLM technique. It has a BERT architecture with 6 layers and 384 hidden units, pre-trained on 6-mer DNA sequences. For more details on the pre-training scheme and methods, please check the original [thesis report](http://www.diva-portal.org/smash/record.jsf?dswid=846&pid=diva2%3A1676068&c=1&searchType=SIMPLE&language=en&query=joana+palés&af=%5B%5D&aq=%5B%5B%5D%5D&aq2=%5B%5B%5D%5D&aqe=%5B%5D&noOfRows=50&sortOrder=author_sort_asc&sortOrder2=title_sort_asc&onlyFullText=false&sf=all).. ## How to Use The model can be used to fine-tune on a downstream genomic task, e.g. promoter identification. ```python import torch from transformers import BertForSequenceClassification model = BertForSequenceClassification.from_pretrained('Peltarion/dnabert-minilm-small') ``` More details on how to fine-tune the model, dataset and additional source codes are available on [github.com/joanaapa/Distillation-DNABERT-Promoter](https://github.com/joanaapa/Distillation-DNABERT-Promoter).
roshnir/xlmr-finetuned-mlqa-dev-en-zh-hi
1e3705e14ed2d6bf8e7bdaa74fd0435b882899fb
2022-06-09T19:02:16.000Z
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/xlmr-finetuned-mlqa-dev-en-zh-hi
5
null
transformers
17,391
Entry not found
mfreihaut/finetuned-audio-transcriber
3df044cd2b0bd466d35d571c38ca24a6bd95499d
2022-06-10T05:24:30.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
mfreihaut
null
mfreihaut/finetuned-audio-transcriber
5
null
transformers
17,392
Entry not found
titi7242229/roberta-base-bne-finetuned_personality_multi
0ab729cc14001f2bd3fbd26d0b9d0b93c534cf4f
2022-06-10T14:19:54.000Z
[ "pytorch", "tensorboard", "roberta", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
titi7242229
null
titi7242229/roberta-base-bne-finetuned_personality_multi
5
null
transformers
17,393
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: roberta-base-bne-finetuned_personality_multi results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-bne-finetuned_personality_multi This model is a fine-tuned version of [BSC-TeMU/roberta-base-bne](https://huggingface.co/BSC-TeMU/roberta-base-bne) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.3709 - Accuracy: 0.5130 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.2576 | 1.0 | 125 | 2.2755 | 0.2340 | | 2.0409 | 2.0 | 250 | 2.1425 | 0.2974 | | 1.6358 | 3.0 | 375 | 1.8730 | 0.4403 | | 1.3553 | 4.0 | 500 | 1.7443 | 0.5032 | | 0.9201 | 5.0 | 625 | 1.7165 | 0.5055 | | 0.5199 | 6.0 | 750 | 1.7476 | 0.5107 | | 0.5588 | 7.0 | 875 | 1.7758 | 0.5153 | | 0.2079 | 8.0 | 1000 | 1.7964 | 0.5251 | | 0.2685 | 9.0 | 1125 | 1.8886 | 0.5187 | | 0.1261 | 10.0 | 1250 | 1.9463 | 0.5199 | | 0.1105 | 11.0 | 1375 | 2.0337 | 0.5222 | | 0.1572 | 12.0 | 1500 | 2.1206 | 0.5084 | | 0.0643 | 13.0 | 1625 | 2.1815 | 0.5182 | | 0.0174 | 14.0 | 1750 | 2.2412 | 0.5176 | | 0.0266 | 15.0 | 1875 | 2.2741 | 0.5112 | | 0.0447 | 16.0 | 2000 | 2.3089 | 0.5159 | | 0.02 | 17.0 | 2125 | 2.3401 | 0.5135 | | 0.0414 | 18.0 | 2250 | 2.3504 | 0.5159 | | 0.0122 | 19.0 | 2375 | 2.3661 | 0.5130 | | 0.0154 | 20.0 | 2500 | 2.3709 | 0.5130 | ### Framework versions - Transformers 4.19.3 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
edumunozsala/vit_base-224-in21k-ft-cifar10
115eb6c2394aaf071ae8a52a22366e137cb5fb29
2022-07-21T10:54:09.000Z
[ "pytorch", "vit", "image-classification", "es", "dataset:cifar10", "arxiv:2006.03677", "transformers", "sagemaker", "ImageClassification", "generated_from_trainer", "license:apache-2.0", "model-index" ]
image-classification
false
edumunozsala
null
edumunozsala/vit_base-224-in21k-ft-cifar10
5
null
transformers
17,394
--- language: es tags: - sagemaker - vit - ImageClassification - generated_from_trainer license: apache-2.0 datasets: - cifar10 metrics: - accuracy model-index: - name: vit_base-224-in21k-ft-cifar10 results: - task: name: Image Classification type: image-classification dataset: name: "Cifar10" type: cifar10 metrics: - name: Accuracy type: accuracy value: 0.97 --- # Model vit_base-224-in21k-ft-cifar10 ## **A finetuned model for Image classification in Spanish** This model was trained using Amazon SageMaker and the Hugging Face Deep Learning container, The base model is **Vision Transformer (base-sized model)** which is a transformer encoder model (BERT-like) pretrained on a large collection of images in a supervised fashion, namely ImageNet-21k, at a resolution of 224x224 pixels.[Link to base model](https://huggingface.co/google/vit-base-patch16-224-in21k) ## Base model citation ### BibTeX entry and citation info ```bibtex @misc{wu2020visual, title={Visual Transformers: Token-based Image Representation and Processing for Computer Vision}, author={Bichen Wu and Chenfeng Xu and Xiaoliang Dai and Alvin Wan and Peizhao Zhang and Zhicheng Yan and Masayoshi Tomizuka and Joseph Gonzalez and Kurt Keutzer and Peter Vajda}, year={2020}, eprint={2006.03677}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ## Dataset [Link to dataset description](http://www.cs.toronto.edu/~kriz/cifar.html) The CIFAR-10 and CIFAR-100 are labeled subsets of the 80 million tiny images dataset. They were collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class. Sizes of datasets: - Train dataset: 50,000 - Test dataset: 10,000 ## Intended uses & limitations This model is intented for Image Classification. ## Hyperparameters { "epochs": "5", "train_batch_size": "32", "eval_batch_size": "8", "fp16": "true", "learning_rate": "1e-05", } ## Test results - Accuracy = 0.97 ## Model in action ### Usage for Image Classification ```python from transformers import ViTFeatureExtractor, ViTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') model = ViTModel.from_pretrained('edumunozsala/vit_base-224-in21k-ft-cifar10') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ``` Created by [Eduardo Muñoz/@edumunozsala](https://github.com/edumunozsala)
titi7242229/roberta-base-bne-finetuned_personality_multi_4
4b97cf1711b016e37aef57d5ab9938275a4e20a2
2022-06-11T19:13:27.000Z
[ "pytorch", "tensorboard", "roberta", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
titi7242229
null
titi7242229/roberta-base-bne-finetuned_personality_multi_4
5
null
transformers
17,395
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: roberta-base-bne-finetuned_personality_multi_4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-bne-finetuned_personality_multi_4 This model is a fine-tuned version of [BSC-TeMU/roberta-base-bne](https://huggingface.co/BSC-TeMU/roberta-base-bne) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.1709 - Accuracy: 0.3470 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.1759 | 1.0 | 125 | 2.1873 | 0.2548 | | 1.8651 | 2.0 | 250 | 2.2285 | 0.2680 | | 1.8619 | 3.0 | 375 | 2.1732 | 0.2951 | | 1.7224 | 4.0 | 500 | 2.0688 | 0.3925 | | 1.6432 | 5.0 | 625 | 2.1094 | 0.3735 | | 1.3599 | 6.0 | 750 | 2.1732 | 0.3631 | | 1.0623 | 7.0 | 875 | 2.4785 | 0.3579 | | 1.0504 | 8.0 | 1000 | 2.4598 | 0.3844 | | 0.7662 | 9.0 | 1125 | 2.8081 | 0.3573 | | 0.9167 | 10.0 | 1250 | 2.9385 | 0.3452 | | 0.6391 | 11.0 | 1375 | 2.9933 | 0.3320 | | 0.3893 | 12.0 | 1500 | 3.1037 | 0.3579 | | 0.673 | 13.0 | 1625 | 3.4369 | 0.3631 | | 0.3498 | 14.0 | 1750 | 3.6396 | 0.3383 | | 0.3891 | 15.0 | 1875 | 3.8332 | 0.3556 | | 0.0818 | 16.0 | 2000 | 3.9451 | 0.3401 | | 0.1438 | 17.0 | 2125 | 3.9271 | 0.3458 | | 0.0634 | 18.0 | 2250 | 4.1564 | 0.3481 | | 0.0121 | 19.0 | 2375 | 4.1405 | 0.3499 | | 0.0071 | 20.0 | 2500 | 4.1709 | 0.3470 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Lindeberg/distilbert-base-uncased-finetuned-cola
c1bfd0d5b5ad2589e8dfa06731d1b7b47c1972cd
2022-06-11T21:10:06.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
Lindeberg
null
Lindeberg/distilbert-base-uncased-finetuned-cola
5
null
transformers
17,396
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.4496664370323995 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.4949 - Matthews Correlation: 0.4497 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5231 | 1.0 | 535 | 0.4949 | 0.4497 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
MyMild/finetune_iapp_thaiqa
46b6f7f0cbfbc9f52dd7fc3a9839fe4ea39ece55
2022-06-12T07:52:39.000Z
[ "pytorch", "tensorboard", "camembert", "question-answering", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
question-answering
false
MyMild
null
MyMild/finetune_iapp_thaiqa
5
null
transformers
17,397
--- tags: - generated_from_trainer model-index: - name: finetune_iapp_thaiqa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetune_iapp_thaiqa This model is a fine-tuned version of [airesearch/wangchanberta-base-att-spm-uncased](https://huggingface.co/airesearch/wangchanberta-base-att-spm-uncased) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.15.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.10.3
Jeevesh8/std_pnt_04_feather_berts-14
604e1fe60a2355cbd8aeda9738e39ea23b0777bd
2022-06-12T06:03:23.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_pnt_04_feather_berts-14
5
null
transformers
17,398
Entry not found
Jeevesh8/std_pnt_04_feather_berts-10
ebdea55a64e3a92bc88663ea7b72e8800eab8e49
2022-06-12T06:04:23.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_pnt_04_feather_berts-10
5
null
transformers
17,399
Entry not found