modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
echarlaix/distilbert-sst2-inc-dynamic-quantization-magnitude-pruning-0.1
d568bc2520a81b6017a152db2fa0ef80b611dcd3
2022-06-06T11:25:48.000Z
[ "pytorch", "distilbert", "text-classification", "transformers", "license:apache-2.0" ]
text-classification
false
echarlaix
null
echarlaix/distilbert-sst2-inc-dynamic-quantization-magnitude-pruning-0.1
10
null
transformers
11,900
--- license: apache-2.0 ---
nboudad/Maghriberta0.0
75ce27119a6b44ab753eb448b22938562e90c2f6
2022-06-07T12:05:50.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
nboudad
null
nboudad/Maghriberta0.0
10
null
transformers
11,901
--- widget: - text: "جاب ليا <mask> ." example_title: "example1" - text: "مشيت نجيب <mask> فالفرماسيان ." example_title: "example2" ---
ahmeddbahaa/mT5_multilingual_XLSum-finetuned-en-cnn
c2316e37b3a236cb95559cf40aa7c9673fd2428a
2022-06-09T06:28:23.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "dataset:cnn_dailymail", "transformers", "summarization", "en", "Abstractive Summarization", "generated_from_trainer", "model-index", "autotrain_compatible" ]
summarization
false
ahmeddbahaa
null
ahmeddbahaa/mT5_multilingual_XLSum-finetuned-en-cnn
10
null
transformers
11,902
--- tags: - summarization - en - mt5 - Abstractive Summarization - generated_from_trainer datasets: - cnn_dailymail model-index: - name: mT5_multilingual_XLSum-finetuned-en-cnn results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mT5_multilingual_XLSum-finetuned-en-cnn This model is a fine-tuned version of [csebuetnlp/mT5_multilingual_XLSum](https://huggingface.co/csebuetnlp/mT5_multilingual_XLSum) on the cnn_dailymail dataset. It achieves the following results on the evaluation set: - Loss: 3.0025 - Rouge-1: 36.87 - Rouge-2: 15.31 - Rouge-l: 33.74 - Gen Len: 77.93 - Bertscore: 88.28 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 250 - num_epochs: 4 - label_smoothing_factor: 0.1 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
hossay/distilbert-base-uncased-finetuned-ner
965bf2583488faa9ec90335b11bb5af7e655dcb9
2022-07-13T13:32:51.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
hossay
null
hossay/distilbert-base-uncased-finetuned-ner
10
null
transformers
11,903
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9263064854712186 - name: Recall type: recall value: 0.9379125181787672 - name: F1 type: f1 value: 0.9320733740967203 - name: Accuracy type: accuracy value: 0.9838117781625813 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0614 - Precision: 0.9263 - Recall: 0.9379 - F1: 0.9321 - Accuracy: 0.9838 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2418 | 1.0 | 878 | 0.0709 | 0.9168 | 0.9242 | 0.9204 | 0.9806 | | 0.0514 | 2.0 | 1756 | 0.0622 | 0.9175 | 0.9338 | 0.9255 | 0.9826 | | 0.0306 | 3.0 | 2634 | 0.0614 | 0.9263 | 0.9379 | 0.9321 | 0.9838 | ### Framework versions - Transformers 4.19.3 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
ghpkishore/distilbert-base-uncased-finetuned-emotion
e73e8fc09e8234b3b82021ff51ffa300c22f95e2
2022-07-22T10:09:57.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:emotion", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
ghpkishore
null
ghpkishore/distilbert-base-uncased-finetuned-emotion
10
null
transformers
11,904
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion args: default metrics: - name: Accuracy type: accuracy value: 0.9285 - name: F1 type: f1 value: 0.9285439912301902 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2183 - Accuracy: 0.9285 - F1: 0.9285 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8381 | 1.0 | 250 | 0.3165 | 0.9075 | 0.9040 | | 0.2524 | 2.0 | 500 | 0.2183 | 0.9285 | 0.9285 | ### Framework versions - Transformers 4.19.3 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
mmillet/distilrubert-tiny-cased-conversational-v1_single_finetuned_on_cedr_augmented
7f7f00822d1ff99e71e01bc674b06b127db040c2
2022-06-10T20:27:38.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
mmillet
null
mmillet/distilrubert-tiny-cased-conversational-v1_single_finetuned_on_cedr_augmented
10
null
transformers
11,905
--- tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: distilrubert-tiny-cased-conversational-v1_single_finetuned_on_cedr_augmented results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilrubert-tiny-cased-conversational-v1_single_finetuned_on_cedr_augmented This model is a fine-tuned version of [DeepPavlov/distilrubert-tiny-cased-conversational-v1](https://huggingface.co/DeepPavlov/distilrubert-tiny-cased-conversational-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.5908 - Accuracy: 0.8653 - F1: 0.8656 - Precision: 0.8665 - Recall: 0.8653 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | 0.9172 | 1.0 | 69 | 0.5124 | 0.8246 | 0.8220 | 0.8271 | 0.8246 | | 0.4709 | 2.0 | 138 | 0.4279 | 0.8528 | 0.8505 | 0.8588 | 0.8528 | | 0.3194 | 3.0 | 207 | 0.3770 | 0.8737 | 0.8727 | 0.8740 | 0.8737 | | 0.2459 | 4.0 | 276 | 0.3951 | 0.8685 | 0.8682 | 0.8692 | 0.8685 | | 0.1824 | 5.0 | 345 | 0.4005 | 0.8831 | 0.8834 | 0.8841 | 0.8831 | | 0.1515 | 6.0 | 414 | 0.4356 | 0.8800 | 0.8797 | 0.8801 | 0.8800 | | 0.1274 | 7.0 | 483 | 0.4642 | 0.8727 | 0.8726 | 0.8731 | 0.8727 | | 0.0833 | 8.0 | 552 | 0.5226 | 0.8633 | 0.8627 | 0.8631 | 0.8633 | | 0.073 | 9.0 | 621 | 0.5327 | 0.8695 | 0.8686 | 0.8692 | 0.8695 | | 0.0575 | 10.0 | 690 | 0.5908 | 0.8653 | 0.8656 | 0.8665 | 0.8653 | ### Framework versions - Transformers 4.19.3 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
sasuke/bert-base-uncased-finetuned-sst2
68b034e693b214a4d0d89c3e24e13b120f69c869
2022-06-16T03:58:09.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
sasuke
null
sasuke/bert-base-uncased-finetuned-sst2
10
null
transformers
11,906
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: bert-base-uncased-finetuned-sst2 results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: sst2 metrics: - name: Accuracy type: accuracy value: 0.9323394495412844 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-sst2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.2982 - Accuracy: 0.9323 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.1817 | 1.0 | 4210 | 0.2920 | 0.9186 | | 0.1297 | 2.0 | 8420 | 0.3069 | 0.9209 | | 0.0978 | 3.0 | 12630 | 0.2982 | 0.9323 | | 0.062 | 4.0 | 16840 | 0.3278 | 0.9312 | | 0.0303 | 5.0 | 21050 | 0.3642 | 0.9323 | ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0 - Datasets 2.2.2 - Tokenizers 0.12.1
orkg/orkgnlp-tdm-extraction
733949b6dda81ceeb35dfb22db9490c90d68bcc8
2022-06-13T16:00:47.000Z
[ "pytorch", "xlnet", "text-classification", "transformers", "license:mit" ]
text-classification
false
orkg
null
orkg/orkgnlp-tdm-extraction
10
null
transformers
11,907
--- license: mit --- This Repository includes the files required to run the `TDM Extraction` ORKG-NLP service. Please check [this article](https://orkg-nlp-pypi.readthedocs.io/en/latest/services/services.html) for more details about the service.
Deborah/bertimbau-finetuned-pos-accelerate2
1cbebcaa02c5322df8b2cd20bd8c3c30d88b138b
2022-06-13T13:21:47.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Deborah
null
Deborah/bertimbau-finetuned-pos-accelerate2
10
null
transformers
11,908
Entry not found
ghadeermobasher/BioNLP13-Modified-PubMedBERT-384
ba9c741c2983a8819bed47b55025652666238c92
2022-06-13T21:45:39.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioNLP13-Modified-PubMedBERT-384
10
null
transformers
11,909
Entry not found
ghadeermobasher/BC5CDR-Chem-Original-SciBERT-512
e931ebb74243f4debc1e1c3808e7a74978a8a105
2022-06-14T00:13:07.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BC5CDR-Chem-Original-SciBERT-512
10
null
transformers
11,910
Entry not found
ghadeermobasher/BC5CDR-Chem-Original-BlueBERT-512
49515824527c4c1d8ba3273085046c16621d8480
2022-06-14T00:22:24.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BC5CDR-Chem-Original-BlueBERT-512
10
null
transformers
11,911
Entry not found
ghadeermobasher/BC5CDR-Chem-Original-BioBERT-512
bcadd56d47e3356db8056c3020fc1dd7a75afa84
2022-06-14T00:24:48.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BC5CDR-Chem-Original-BioBERT-512
10
null
transformers
11,912
Entry not found
ahmeddbahaa/xlmroberta2xlmroberta-finetune-summarization-ar
c872b9ce57c13fc105b6929c6017587a21ebe69a
2022-06-14T16:05:58.000Z
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xlsum", "transformers", "summarization", "ar", "xlm-roberta", "Abstractive Summarization", "roberta", "generated_from_trainer", "model-index", "autotrain_compatible" ]
summarization
false
ahmeddbahaa
null
ahmeddbahaa/xlmroberta2xlmroberta-finetune-summarization-ar
10
null
transformers
11,913
--- tags: - summarization - ar - encoder-decoder - xlm-roberta - Abstractive Summarization - roberta - generated_from_trainer datasets: - xlsum model-index: - name: xlmroberta2xlmroberta-finetune-summarization-ar results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmroberta2xlmroberta-finetune-summarization-ar This model is a fine-tuned version of [](https://huggingface.co/) on the xlsum dataset. It achieves the following results on the evaluation set: - Loss: 4.1298 - Rouge-1: 21.69 - Rouge-2: 8.73 - Rouge-l: 19.52 - Gen Len: 19.96 - Bertscore: 71.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 250 - num_epochs: 10 - label_smoothing_factor: 0.1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge-1 | Rouge-2 | Rouge-l | Gen Len | Bertscore | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:-------:|:---------:| | 8.0645 | 1.0 | 1172 | 7.3567 | 8.22 | 0.66 | 7.94 | 20.0 | 58.18 | | 7.2042 | 2.0 | 2344 | 6.6058 | 12.12 | 2.19 | 11.4 | 20.0 | 63.24 | | 6.4168 | 3.0 | 3516 | 5.8784 | 16.46 | 4.31 | 15.15 | 20.0 | 66.3 | | 5.4622 | 4.0 | 4688 | 4.7931 | 17.6 | 5.87 | 15.9 | 19.99 | 69.21 | | 4.7829 | 5.0 | 5860 | 4.4418 | 19.17 | 6.74 | 17.22 | 19.98 | 70.23 | | 4.4829 | 6.0 | 7032 | 4.2950 | 19.8 | 7.11 | 17.74 | 19.98 | 70.38 | | 4.304 | 7.0 | 8204 | 4.2155 | 20.71 | 7.59 | 18.56 | 19.98 | 70.66 | | 4.1778 | 8.0 | 9376 | 4.1632 | 21.1 | 7.94 | 18.99 | 19.98 | 70.86 | | 4.0886 | 9.0 | 10548 | 4.1346 | 21.44 | 8.03 | 19.28 | 19.98 | 70.93 | | 4.0294 | 10.0 | 11720 | 4.1298 | 21.51 | 8.14 | 19.33 | 19.98 | 71.02 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
erickfm/true-sweep-1
c3a167d88698b45c5fecd651fc122eab31f8603c
2022-06-15T03:51:23.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/true-sweep-1
10
1
transformers
11,914
Entry not found
hossay/biobert-base-cased-v1.2-finetuned-ner
55ed1aa19bd4d757ca31bf868ff82b86c7687047
2022-06-15T07:38:51.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "dataset:ncbi_disease", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
token-classification
false
hossay
null
hossay/biobert-base-cased-v1.2-finetuned-ner
10
null
transformers
11,915
--- tags: - generated_from_trainer datasets: - ncbi_disease metrics: - precision - recall - f1 - accuracy model-index: - name: biobert-base-cased-v1.2-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: ncbi_disease type: ncbi_disease args: ncbi_disease metrics: - name: Precision type: precision value: 0.8396334478808706 - name: Recall type: recall value: 0.8731387730792138 - name: F1 type: f1 value: 0.856058394160584 - name: Accuracy type: accuracy value: 0.9824805769647444 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # biobert-base-cased-v1.2-finetuned-ner This model is a fine-tuned version of [dmis-lab/biobert-base-cased-v1.2](https://huggingface.co/dmis-lab/biobert-base-cased-v1.2) on the ncbi_disease dataset. It achieves the following results on the evaluation set: - Loss: 0.0706 - Precision: 0.8396 - Recall: 0.8731 - F1: 0.8561 - Accuracy: 0.9825 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 340 | 0.0691 | 0.8190 | 0.7868 | 0.8026 | 0.9777 | | 0.101 | 2.0 | 680 | 0.0700 | 0.8334 | 0.8553 | 0.8442 | 0.9807 | | 0.0244 | 3.0 | 1020 | 0.0706 | 0.8396 | 0.8731 | 0.8561 | 0.9825 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.3.0 - Tokenizers 0.12.1
AnyaSchen/rugpt3_tyutchev
015f7ab0d83bd9eff4ce5bf7a92ef7d6d5009e0d
2022-06-15T11:33:16.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
AnyaSchen
null
AnyaSchen/rugpt3_tyutchev
10
null
transformers
11,916
This model was created as a fine-tuned GPT-3 medium model, which is tuned to the style of Tyutchev's poetry in Russian. You can give her a word, a phrase, or just an empty line as an input, and she will give out a poem in the style of Tyutchev. ![alt text](https://lh4.googleusercontent.com/1B05-wqyj_8gI6zTues5f7a1epqkJ5FW672q3ReHCQ-d3qS0pIrKBIEyX2feWb66Y4Y=w2400)
ghadeermobasher/BC5CDR-Chem-Original-BioBERT-384
5ada33335ad7eee029a4e76b61c5176479387bd0
2022-06-15T13:01:09.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BC5CDR-Chem-Original-BioBERT-384
10
null
transformers
11,917
Entry not found
adamlin/question-paraphraser
e7eff5db2f0a792d680c1cd41fc30282862cd32c
2022-06-16T00:35:09.000Z
[ "pytorch", "mt5", "text2text-generation", "dataset:adamlin/question_augmentation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
adamlin
null
adamlin/question-paraphraser
10
null
transformers
11,918
--- license: apache-2.0 tags: - generated_from_trainer datasets: - adamlin/question_augmentation metrics: - rouge model-index: - name: question-paraphraser results: - task: name: Summarization type: summarization dataset: name: adamlin/question_augmentation type: adamlin/question_augmentation metrics: - name: Rouge1 type: rouge value: 0.5385 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # question-paraphraser This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the adamlin/question_augmentation dataset. It achieves the following results on the evaluation set: - Loss: 3.5901 - Rouge1: 0.5385 - Rouge2: 0.0769 - Rougel: 0.5586 - Rougelsum: 0.5586 - Gen Len: 7.6712 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0 - Datasets 2.3.2 - Tokenizers 0.12.1
Afework/t5_boolq
db94925977ed39c03432ff5d76bf21c9c42ca221
2022-06-16T16:23:35.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Afework
null
Afework/t5_boolq
10
null
transformers
11,919
Entry not found
anantoj/T5-summarizer-simple-wiki
8e211ba089abee6549308df8745a97f23f434ea7
2022-06-16T10:47:42.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
anantoj
null
anantoj/T5-summarizer-simple-wiki
10
null
transformers
11,920
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: results results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # results This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0868 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 2.2583 | 1.0 | 14719 | 2.1164 | | 2.2649 | 2.0 | 29438 | 2.0925 | | 2.209 | 3.0 | 44157 | 2.0868 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Afework/t5-mcq
49027311979b5ef9fc6fdfcf807ec8176fb6d711
2022-06-16T18:52:15.000Z
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Afework
null
Afework/t5-mcq
10
null
transformers
11,921
Entry not found
mariolinml/roberta-large-finetuned-chunking
b1422508caa99472f24359dc96b0f6b611e5303f
2022-06-18T20:09:57.000Z
[ "pytorch", "bert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
mariolinml
null
mariolinml/roberta-large-finetuned-chunking
10
null
transformers
11,922
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: roberta-large-finetuned-chunking results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-finetuned-chunking This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4192 - Precision: 0.3222 - Recall: 0.3161 - F1: 0.3191 - Accuracy: 0.8632 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0373 | 1.0 | 2498 | 0.9545 | 0.3166 | 0.2545 | 0.2822 | 0.8656 | | 0.0045 | 2.0 | 4996 | 1.1324 | 0.2667 | 0.3142 | 0.2885 | 0.8525 | | 0.0022 | 3.0 | 7494 | 1.3138 | 0.3349 | 0.3097 | 0.3218 | 0.8672 | | 0.0015 | 4.0 | 9992 | 1.3454 | 0.3261 | 0.3260 | 0.3260 | 0.8647 | | 0.0014 | 5.0 | 12490 | 1.3640 | 0.3064 | 0.3126 | 0.3095 | 0.8603 | | 0.0008 | 6.0 | 14988 | 1.4192 | 0.3222 | 0.3161 | 0.3191 | 0.8632 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0 - Datasets 2.3.2 - Tokenizers 0.12.1
camilag/bertimbau-finetuned-pos-accelerate-5
39c7908b15dc5ad877ed20319671f1b349cad0f7
2022-06-19T02:24:11.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
camilag
null
camilag/bertimbau-finetuned-pos-accelerate-5
10
null
transformers
11,923
Entry not found
camilag/bertimbau-finetuned-pos-accelerate-6
87e70efc2e3be25947e5cdb2b3ad06f25457fc1c
2022-06-20T23:14:00.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
camilag
null
camilag/bertimbau-finetuned-pos-accelerate-6
10
null
transformers
11,924
Entry not found
scjones/distilbert-base-uncased-finetuned-emotion
f872858c1ae527b2c7a4f6fe1eaeb49fb6c7917f
2022-06-21T00:16:41.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:emotion", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
scjones
null
scjones/distilbert-base-uncased-finetuned-emotion
10
null
transformers
11,925
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion args: default metrics: - name: Accuracy type: accuracy value: 0.9315 - name: F1 type: f1 value: 0.9317528216385311 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1630 - Accuracy: 0.9315 - F1: 0.9318 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.2115 | 1.0 | 250 | 0.1696 | 0.93 | 0.9295 | | 0.1376 | 2.0 | 500 | 0.1630 | 0.9315 | 0.9318 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
SISLab/amc-opt-msmd
5373bac5bc2f17905e0a4c51edfe772109220cf1
2022-06-21T11:43:57.000Z
[ "pytorch", "bert", "it", "transformers", "text-classification", "sentiment-analysis" ]
text-classification
false
SISLab
null
SISLab/amc-opt-msmd
10
null
transformers
11,926
--- tags: - text-classification - sentiment-analysis language: - "it" ---
kktoto/tiny_focal_ckpt
30688a382ec3f287363f3b2976d2466c5d335bf8
2022-06-21T15:05:00.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
token-classification
false
kktoto
null
kktoto/tiny_focal_ckpt
10
null
transformers
11,927
--- tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: tiny_focal_ckpt results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tiny_focal_ckpt This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0561 - Precision: 0.6529 - Recall: 0.6366 - F1: 0.6446 - Accuracy: 0.9516 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.058 | 1.0 | 5561 | 0.0583 | 0.6327 | 0.5945 | 0.6130 | 0.9484 | | 0.0566 | 2.0 | 11122 | 0.0570 | 0.6401 | 0.5985 | 0.6186 | 0.9492 | | 0.0564 | 3.0 | 16683 | 0.0567 | 0.6364 | 0.6241 | 0.6302 | 0.9496 | | 0.053 | 4.0 | 22244 | 0.0561 | 0.6416 | 0.6312 | 0.6364 | 0.9503 | | 0.052 | 5.0 | 27805 | 0.0558 | 0.6501 | 0.6239 | 0.6367 | 0.9510 | | 0.0507 | 6.0 | 33366 | 0.0555 | 0.6555 | 0.6208 | 0.6377 | 0.9514 | | 0.0497 | 7.0 | 38927 | 0.0552 | 0.6559 | 0.6256 | 0.6404 | 0.9515 | | 0.0485 | 8.0 | 44488 | 0.0561 | 0.6485 | 0.6397 | 0.6440 | 0.9513 | | 0.0481 | 9.0 | 50049 | 0.0558 | 0.6531 | 0.6344 | 0.6436 | 0.9515 | | 0.0469 | 10.0 | 55610 | 0.0561 | 0.6529 | 0.6366 | 0.6446 | 0.9516 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Jeevesh8/std_0pnt2_bert_ft_cola-35
35dd3a62eee243aa1b92c8f499888473f3491b8e
2022-06-21T13:27:41.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_0pnt2_bert_ft_cola-35
10
null
transformers
11,928
Entry not found
Jeevesh8/std_0pnt2_bert_ft_cola-18
19e6a8ec6913e87c16066f347e0812f46cc22cdf
2022-06-21T13:30:08.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_0pnt2_bert_ft_cola-18
10
null
transformers
11,929
Entry not found
Jeevesh8/std_0pnt2_bert_ft_cola-26
d7189315c8182cad18a7839d73b79e6e45b88388
2022-06-21T13:28:10.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_0pnt2_bert_ft_cola-26
10
null
transformers
11,930
Entry not found
Jeevesh8/std_0pnt2_bert_ft_cola-21
b9170bc88e91639025982b7d11d176230909fad5
2022-06-21T13:28:22.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_0pnt2_bert_ft_cola-21
10
null
transformers
11,931
Entry not found
Jeevesh8/std_0pnt2_bert_ft_cola-9
d061aac8cd1be45073ca1f788d599e515ae7e35d
2022-06-21T13:31:10.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_0pnt2_bert_ft_cola-9
10
null
transformers
11,932
Entry not found
Jeevesh8/std_0pnt2_bert_ft_cola-27
9663f06dabf0df15da178dadd13cd4196819953b
2022-06-21T13:28:11.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_0pnt2_bert_ft_cola-27
10
null
transformers
11,933
Entry not found
Jeevesh8/std_0pnt2_bert_ft_cola-8
7e161c58e2bac0432ace20a33b94c92c83424f0b
2022-06-21T13:30:49.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/std_0pnt2_bert_ft_cola-8
10
null
transformers
11,934
Entry not found
Sayan01/tiny-bert-mrpc-distilled
a6848fc64ab85be64b0b18b25d0b541e67c8027a
2022-07-15T19:32:31.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Sayan01
null
Sayan01/tiny-bert-mrpc-distilled
10
null
transformers
11,935
Entry not found
deepesh0x/autotrain-GlueFineTunedModel-1013533786
0a640f183640259de3e9460a674e34f186b3c468
2022-06-21T18:05:40.000Z
[ "pytorch", "bert", "text-classification", "unk", "dataset:deepesh0x/autotrain-data-GlueFineTunedModel", "transformers", "autotrain", "co2_eq_emissions" ]
text-classification
false
deepesh0x
null
deepesh0x/autotrain-GlueFineTunedModel-1013533786
10
1
transformers
11,936
--- tags: autotrain language: unk widget: - text: "I love AutoTrain 🤗" datasets: - deepesh0x/autotrain-data-GlueFineTunedModel co2_eq_emissions: 57.79463560530838 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1013533786 - CO2 Emissions (in grams): 57.79463560530838 ## Validation Metrics - Loss: 0.18257243931293488 - Accuracy: 0.9261538461538461 - Precision: 0.9244319632371713 - Recall: 0.9282235324275827 - AUC: 0.9800523984255356 - F1: 0.92632386799693 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/deepesh0x/autotrain-GlueFineTunedModel-1013533786 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("deepesh0x/autotrain-GlueFineTunedModel-1013533786", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("deepesh0x/autotrain-GlueFineTunedModel-1013533786", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
camilag/bertimbau-finetuned-pos-accelerate-7
563bd6b793aeca222dbd8f73881c4c1e1c8c545b
2022-06-21T21:36:12.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
camilag
null
camilag/bertimbau-finetuned-pos-accelerate-7
10
null
transformers
11,937
Entry not found
bsenker/autotrain-sentanaly-1016134101
30617ddcf51d1485037cbb5380d45ae3fc0c3bd4
2022-06-22T03:34:19.000Z
[ "pytorch", "bert", "text-classification", "tr", "dataset:bsenker/autotrain-data-sentanaly", "transformers", "autotrain", "co2_eq_emissions" ]
text-classification
false
bsenker
null
bsenker/autotrain-sentanaly-1016134101
10
null
transformers
11,938
--- tags: autotrain language: tr widget: - text: "I love AutoTrain 🤗" datasets: - bsenker/autotrain-data-sentanaly co2_eq_emissions: 2.4274113973426568 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1016134101 - CO2 Emissions (in grams): 2.4274113973426568 ## Validation Metrics - Loss: 0.8357052803039551 - Accuracy: 0.6425438596491229 - Macro F1: 0.6449751139113629 - Micro F1: 0.6425438596491229 - Weighted F1: 0.644975113911363 - Macro Precision: 0.6642782595845687 - Micro Precision: 0.6425438596491229 - Weighted Precision: 0.6642782595845685 - Macro Recall: 0.6425438596491229 - Micro Recall: 0.6425438596491229 - Weighted Recall: 0.6425438596491229 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/bsenker/autotrain-sentanaly-1016134101 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("bsenker/autotrain-sentanaly-1016134101", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("bsenker/autotrain-sentanaly-1016134101", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
upsalite/bert-base-german-cased-finetuned-emotion-14-labels
90110bd5b5f4d790a45491e6c969109e64e6f0ba
2022-06-23T06:25:20.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
upsalite
null
upsalite/bert-base-german-cased-finetuned-emotion-14-labels
10
null
transformers
11,939
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: bert-base-german-cased-finetuned-emotion-14-labels results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-german-cased-finetuned-emotion-14-labels This model is a fine-tuned version of [bert-base-german-cased](https://huggingface.co/bert-base-german-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.1072 - Accuracy: 0.7304 - F1: 0.7302 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 1.9084 | 1.0 | 128 | 1.3889 | 0.5735 | 0.5631 | | 1.0961 | 2.0 | 256 | 1.0875 | 0.6422 | 0.6379 | | 0.7211 | 3.0 | 384 | 0.9900 | 0.6873 | 0.6859 | | 0.4556 | 4.0 | 512 | 0.9495 | 0.7137 | 0.7166 | | 0.2916 | 5.0 | 640 | 0.9807 | 0.7069 | 0.7054 | | 0.1784 | 6.0 | 768 | 0.9956 | 0.7196 | 0.7199 | | 0.1134 | 7.0 | 896 | 1.0471 | 0.7167 | 0.7169 | | 0.0759 | 8.0 | 1024 | 1.0822 | 0.7235 | 0.7225 | | 0.0502 | 9.0 | 1152 | 1.1048 | 0.7157 | 0.7173 | | 0.041 | 10.0 | 1280 | 1.1072 | 0.7304 | 0.7302 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.12.1
Zamachi/distillbert-for-multilabel-sentence-classification
be57db01255f3bfc7664131c2893ba34af4d0f59
2022-07-12T02:13:34.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
Zamachi
null
Zamachi/distillbert-for-multilabel-sentence-classification
10
null
transformers
11,940
Entry not found
truongxl/NER_SucKhoe
7ece9ef590f6b30b695fb3e663a0dd65fbbef07a
2022-06-23T07:46:38.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
truongxl
null
truongxl/NER_SucKhoe
10
null
transformers
11,941
Entry not found
alk/distilbert-base-uncased-finetuned-header-classifier
02408556170bfdd599a2e73e71810ac2fe285779
2022-06-24T15:26:42.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
alk
null
alk/distilbert-base-uncased-finetuned-header-classifier
10
null
transformers
11,942
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-header-classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-header-classifier This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
emekaboris/codetrans_t5_small_mt_ft_git_diff
6e407c1cf7fb9b3f9396303f1b35b294a2e91850
2022-06-26T17:12:55.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
emekaboris
null
emekaboris/codetrans_t5_small_mt_ft_git_diff
10
null
transformers
11,943
Entry not found
alk/roberta-large-mnli-finetuned-header-classifier
e840b52be3c00cf00916111ae6f4a76d5a33649c
2022-06-28T00:28:10.000Z
[ "pytorch", "tensorboard", "roberta", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
alk
null
alk/roberta-large-mnli-finetuned-header-classifier
10
null
transformers
11,944
--- license: mit tags: - generated_from_trainer model-index: - name: roberta-large-mnli-finetuned-header-classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-mnli-finetuned-header-classifier This model is a fine-tuned version of [roberta-large-mnli](https://huggingface.co/roberta-large-mnli) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Yanjie/message-intent-220628
130430f94cb316e11cce0c24b2c4b35493ff88fe
2022-06-28T18:18:11.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
Yanjie
null
Yanjie/message-intent-220628
10
null
transformers
11,945
Entry not found
elliotthwang/mt5-small-finetuned-tradition-zh
fd8a3eacff61a45548bd7948b9bbe359ffd6ad7a
2022-07-18T16:44:21.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "dataset:xlsum", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
elliotthwang
null
elliotthwang/mt5-small-finetuned-tradition-zh
10
null
transformers
11,946
--- license: apache-2.0 tags: - generated_from_trainer datasets: - xlsum metrics: - rouge model-index: - name: mt5-small-finetuned-tradition-zh results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: xlsum type: xlsum args: chinese_traditional metrics: - name: Rouge1 type: rouge value: 5.7806 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-tradition-zh This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the xlsum dataset. It achieves the following results on the evaluation set: - Loss: 2.9218 - Rouge1: 5.7806 - Rouge2: 1.266 - Rougel: 5.761 - Rougelsum: 5.7833 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:------:|:---------:| | 4.542 | 1.0 | 2336 | 3.1979 | 4.8334 | 1.025 | 4.8142 | 4.8326 | | 3.7542 | 2.0 | 4672 | 3.0662 | 5.2155 | 1.0978 | 5.2025 | 5.2158 | | 3.5706 | 3.0 | 7008 | 3.0070 | 5.5471 | 1.3397 | 5.5386 | 5.5391 | | 3.4668 | 4.0 | 9344 | 2.9537 | 5.5865 | 1.1558 | 5.5816 | 5.5964 | | 3.4082 | 5.0 | 11680 | 2.9391 | 5.8061 | 1.3462 | 5.7944 | 5.812 | | 3.375 | 6.0 | 14016 | 2.9218 | 5.7806 | 1.266 | 5.761 | 5.7833 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Jeevesh8/goog_bert_ft_cola-7
66a8aa4e6d06b54bd1a491789379f50fddf18b9d
2022-06-29T17:31:48.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-7
10
null
transformers
11,947
Entry not found
Jeevesh8/goog_bert_ft_cola-6
1a68ef290e72f49843b9150bdcb76b2d0d62e9d6
2022-06-29T17:31:38.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-6
10
null
transformers
11,948
Entry not found
Jeevesh8/goog_bert_ft_cola-8
f1070a84051eb63412d0e33cae466237b3c33717
2022-06-29T17:32:13.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-8
10
null
transformers
11,949
Entry not found
Jeevesh8/goog_bert_ft_cola-10
87d3d8fa6981ea8e3328010d2355c619a2b64d69
2022-06-29T17:32:18.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-10
10
null
transformers
11,950
Entry not found
Jeevesh8/goog_bert_ft_cola-9
9e2e9c03f07bce387581536cf8df69cfbd52c75b
2022-06-29T17:32:17.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-9
10
null
transformers
11,951
Entry not found
Jeevesh8/goog_bert_ft_cola-12
596d4a373bde6d85b424569ab704cb1ba986e6ac
2022-06-29T17:33:27.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-12
10
null
transformers
11,952
Entry not found
Jeevesh8/goog_bert_ft_cola-13
f5cb60504d27224fa472e31d31f152c5049f19f2
2022-06-29T17:33:33.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-13
10
null
transformers
11,953
Entry not found
Jeevesh8/goog_bert_ft_cola-17
8ef6b9dc80d167b6be50878015d7e758dfc989a1
2022-06-29T17:37:37.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-17
10
null
transformers
11,954
Entry not found
Jeevesh8/goog_bert_ft_cola-14
dd45bf27f031e4f5b7585576d55d12c55de43dda
2022-06-29T17:33:41.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-14
10
null
transformers
11,955
Entry not found
Jeevesh8/goog_bert_ft_cola-18
dbb7db9c5b2849188c246b8f5b75f705338d4979
2022-06-29T17:33:45.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-18
10
null
transformers
11,956
Entry not found
Jeevesh8/goog_bert_ft_cola-11
bb316f001706a9fbca697468b6d25838c084ca59
2022-06-29T17:37:53.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-11
10
null
transformers
11,957
Entry not found
Jeevesh8/goog_bert_ft_cola-15
29dc5ef3c279e61bbc1d1a2610f1cd580cfbfe51
2022-06-29T17:33:44.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-15
10
null
transformers
11,958
Entry not found
Jeevesh8/goog_bert_ft_cola-19
5c6a378347af81cdbe7a47b10faff392b3229d6e
2022-06-29T17:33:04.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-19
10
null
transformers
11,959
Entry not found
Jeevesh8/goog_bert_ft_cola-25
7fa43f20fdc97fbd165020530f36f1641e6f928d
2022-06-29T17:33:47.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-25
10
null
transformers
11,960
Entry not found
Jeevesh8/goog_bert_ft_cola-27
a2435f85c89342fccf6c0e4e8813cf1478fa3247
2022-06-29T17:33:52.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-27
10
null
transformers
11,961
Entry not found
Jeevesh8/goog_bert_ft_cola-26
20d68c14493b5373f1a535e888c8eb6557b4514f
2022-06-29T17:34:01.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-26
10
null
transformers
11,962
Entry not found
Jeevesh8/goog_bert_ft_cola-44
305b4d1e8e1290ed0168f8ee061e77b29271ae63
2022-06-29T17:34:06.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-44
10
null
transformers
11,963
Entry not found
Jeevesh8/goog_bert_ft_cola-23
605ac934ddc1f09791cf72ed45dac88cda4f00de
2022-06-29T17:33:06.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-23
10
null
transformers
11,964
Entry not found
Jeevesh8/goog_bert_ft_cola-45
2e26c5fa1f40d305549fd6307187c85a83af03b7
2022-06-29T17:34:04.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-45
10
null
transformers
11,965
Entry not found
Jeevesh8/goog_bert_ft_cola-51
b37a7d3489fa1a382a90aa3d03d41c16aca7a0f8
2022-06-29T17:34:28.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-51
10
null
transformers
11,966
Entry not found
Jeevesh8/goog_bert_ft_cola-55
818acc260c90ccf72dbe52d3fd1f4e540b16f390
2022-06-29T17:34:26.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-55
10
null
transformers
11,967
Entry not found
Jeevesh8/goog_bert_ft_cola-65
58bced61e2f2086632303ddf408108bd1b45dc99
2022-06-29T17:35:34.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-65
10
null
transformers
11,968
Entry not found
Jeevesh8/goog_bert_ft_cola-61
c0c07f02e0b60486a31d87aa006f5e6a3a2c8761
2022-06-29T17:33:17.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-61
10
null
transformers
11,969
Entry not found
Jeevesh8/goog_bert_ft_cola-91
2cf88589e2b755bbb1e038a357db8150d15207e4
2022-06-29T17:34:06.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-91
10
null
transformers
11,970
Entry not found
Jeevesh8/goog_bert_ft_cola-89
74fbd5939f5977c5a11ad0595d272045dcb5f320
2022-06-29T17:35:51.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-89
10
null
transformers
11,971
Entry not found
Jeevesh8/goog_bert_ft_cola-90
df9c407117482542e78b8c053b4fcc6e9ea0c026
2022-06-29T17:35:55.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-90
10
null
transformers
11,972
Entry not found
Jeevesh8/goog_bert_ft_cola-88
226ea54a4932f2559f4b5b776378fe9665485ad6
2022-06-29T17:33:47.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-88
10
null
transformers
11,973
Entry not found
Jeevesh8/goog_bert_ft_cola-93
251cd09a7a88fbb99e332bc3e75dfa207be396d6
2022-06-29T17:35:52.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-93
10
null
transformers
11,974
Entry not found
Jeevesh8/goog_bert_ft_cola-85
eefb3b2225abbb0bee6981e301cfa8384da2be73
2022-06-29T17:34:00.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-85
10
null
transformers
11,975
Entry not found
Jeevesh8/goog_bert_ft_cola-99
edb78e09f80b4ea8a008a370635bc74f7386c197
2022-06-29T17:38:19.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-99
10
null
transformers
11,976
Entry not found
Jeevesh8/goog_bert_ft_cola-95
4b4bd5e1ffaab8fd01a70c6e2d1ac0c104994737
2022-06-29T17:36:03.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-95
10
null
transformers
11,977
Entry not found
Jeevesh8/goog_bert_ft_cola-98
1ba5408f6145fa7649ec325988018911d888f2ca
2022-06-29T17:38:32.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-98
10
null
transformers
11,978
Entry not found
Jeevesh8/goog_bert_ft_cola-94
0ba5e20f1461331e1076eeb66a1b27837cdd2ec3
2022-06-29T17:38:33.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-94
10
null
transformers
11,979
Entry not found
luffycodes/t5_small_v51
3b411e163d899e8136e375991b435d10363d29eb
2022-07-11T10:15:07.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
luffycodes
null
luffycodes/t5_small_v51
10
null
transformers
11,980
Entry not found
Luojike/autotrain-test_3-1071537591
edeced2fb9f06d276bf38d024f7ebebc64f6da04
2022-07-01T15:04:07.000Z
[ "pytorch", "bert", "text-classification", "unk", "dataset:Luojike/autotrain-data-test_3", "transformers", "autotrain", "co2_eq_emissions" ]
text-classification
false
Luojike
null
Luojike/autotrain-test_3-1071537591
10
null
transformers
11,981
--- tags: autotrain language: unk widget: - text: "I love AutoTrain 🤗" datasets: - Luojike/autotrain-data-test_3 co2_eq_emissions: 0.03985401798934018 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1071537591 - CO2 Emissions (in grams): 0.03985401798934018 ## Validation Metrics - Loss: 0.5283975601196289 - Accuracy: 0.7389705882352942 - Precision: 0.5032894736842105 - Recall: 0.3574766355140187 - AUC: 0.7135599403856304 - F1: 0.41803278688524587 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/Luojike/autotrain-test_3-1071537591 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("Luojike/autotrain-test_3-1071537591", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("Luojike/autotrain-test_3-1071537591", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
amasi/wikineural-multilingual-ner
88a0bd54b5a1594a9972ba3c063057784678e957
2022-07-03T19:40:00.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
amasi
null
amasi/wikineural-multilingual-ner
10
null
transformers
11,982
Entry not found
Kayvane/distilbert-complaints-wandb-product
b362343b328d34dc7ee6a41a51ee6f76a6bc4085
2022-07-04T10:52:27.000Z
[ "pytorch", "distilbert", "text-classification", "dataset:consumer-finance-complaints", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
Kayvane
null
Kayvane/distilbert-complaints-wandb-product
10
null
transformers
11,983
--- license: apache-2.0 tags: - generated_from_trainer datasets: - consumer-finance-complaints metrics: - accuracy - f1 - recall - precision model-index: - name: distilbert-complaints-wandb-product results: - task: name: Text Classification type: text-classification dataset: name: consumer-finance-complaints type: consumer-finance-complaints args: default metrics: - name: Accuracy type: accuracy value: 0.8690996641956535 - name: F1 type: f1 value: 0.8645310918904254 - name: Recall type: recall value: 0.8690996641956535 - name: Precision type: precision value: 0.8629318199420283 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-complaints-wandb-product This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the consumer-finance-complaints dataset. It achieves the following results on the evaluation set: - Loss: 0.4431 - Accuracy: 0.8691 - F1: 0.8645 - Recall: 0.8691 - Precision: 0.8629 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.562 | 0.51 | 2000 | 0.5107 | 0.8452 | 0.8346 | 0.8452 | 0.8252 | | 0.4548 | 1.01 | 4000 | 0.4628 | 0.8565 | 0.8481 | 0.8565 | 0.8466 | | 0.3439 | 1.52 | 6000 | 0.4519 | 0.8605 | 0.8544 | 0.8605 | 0.8545 | | 0.2626 | 2.03 | 8000 | 0.4412 | 0.8678 | 0.8618 | 0.8678 | 0.8626 | | 0.2717 | 2.53 | 10000 | 0.4431 | 0.8691 | 0.8645 | 0.8691 | 0.8629 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-320-8
e89458523e371403321ab4e2e3bf163510768a4a
2022-07-04T13:20:01.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-320-8
10
null
transformers
11,984
Entry not found
ghadeermobasher/BioRed-Chem-Modified-PubMedBERT-320-8
cb4eadfec36514be8bede9ef665f56d8a6333466
2022-07-04T13:20:40.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-Chem-Modified-PubMedBERT-320-8
10
null
transformers
11,985
Entry not found
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-128-32
6dd0076c4c22bc154acbda14578d079ce6275df5
2022-07-04T13:25:34.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-128-32
10
null
transformers
11,986
Entry not found
sepidmnorozy/finetuned-sentiment-withGPU
dadb4575d330a784969b0335a36ba0ae38c3eee6
2022-07-04T14:01:11.000Z
[ "pytorch", "xlm-roberta", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
sepidmnorozy
null
sepidmnorozy/finetuned-sentiment-withGPU
10
null
transformers
11,987
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: finetuning-sentiment-model-10-samples_withGPU results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-10-samples_withGPU This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3893 - Accuracy: 0.8744 - F1: 0.8684 - Precision: 0.9126 - Recall: 0.8283 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:---------:|:------:| | 0.3631 | 1.0 | 7088 | 0.3622 | 0.8638 | 0.8519 | 0.9334 | 0.7835 | | 0.35 | 2.0 | 14176 | 0.3875 | 0.8714 | 0.8622 | 0.9289 | 0.8044 | | 0.3262 | 3.0 | 21264 | 0.3893 | 0.8744 | 0.8684 | 0.9126 | 0.8283 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.0 - Datasets 2.0.0 - Tokenizers 0.11.6
Chirayu/subject-generator-t5-base
a84f4bf6aaf7103d762800b185a6edb5a559c173
2022-07-12T10:33:42.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Chirayu
null
Chirayu/subject-generator-t5-base
10
null
transformers
11,988
# What does this model do? This model generates a subject line for the email, given the whole email as input. It is fine-tuned T5-Base Here is how to use this model ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import torch model = AutoModelForSeq2SeqLM.from_pretrained("Chirayu/subject-generator-t5-base") tokenizer = AutoTokenizer.from_pretrained("Chirayu/subject-generator-t5-base") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) def get_subject(content, num_beams=5,max_length=512, repetition_penalty=2.5, length_penalty=1, early_stopping=True,top_p=.95, top_k=50, num_return_sequences=3): text = "title: " + content + " </s>" input_ids = tokenizer.encode( text, return_tensors="pt", add_special_tokens=True ) input_ids = input_ids.to(device) generated_ids = model.generate( input_ids=input_ids, num_beams=num_beams, max_length=max_length, repetition_penalty=repetition_penalty, length_penalty=length_penalty, early_stopping=early_stopping, top_p=top_p, top_k=top_k, num_return_sequences=num_return_sequences, ) subjects = [tokenizer.decode(generated_id,skip_special_tokens=True,clean_up_tokenization_spaces=True,) for generated_id in generated_ids] return subjects ```
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-128-10
3ccf9e1c318233aed127262cf8ac8b2d72579ae8
2022-07-04T14:31:53.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-128-10
10
null
transformers
11,989
Entry not found
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-256-40
662503660a17570b86707f956c96d8492d292113
2022-07-05T12:14:57.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-256-40
10
null
transformers
11,990
Entry not found
domenicrosati/deberta-v3-xsmall-with-biblio-context-finetuned-review_classifier
04ef7381857ac5e8cca24ec397b2f05476647fec
2022-07-07T05:12:58.000Z
[ "pytorch", "tensorboard", "deberta-v2", "transformers", "text-classification", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
domenicrosati
null
domenicrosati/deberta-v3-xsmall-with-biblio-context-finetuned-review_classifier
10
null
transformers
11,991
--- license: mit tags: - text-classification - generated_from_trainer metrics: - accuracy - f1 - recall - precision model-index: - name: deberta-v3-xsmall-with-biblio-context-finetuned-review_classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-v3-xsmall-with-biblio-context-finetuned-review_classifier This model is a fine-tuned version of [microsoft/deberta-v3-xsmall](https://huggingface.co/microsoft/deberta-v3-xsmall) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0979 - Accuracy: 0.9682 - F1: 0.8332 - Recall: 0.8466 - Precision: 0.8202 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4.5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.1539 | 1.0 | 6667 | 0.1237 | 0.9584 | 0.7668 | 0.7307 | 0.8067 | | 0.1271 | 2.0 | 13334 | 0.0979 | 0.9682 | 0.8332 | 0.8466 | 0.8202 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
f00d/distilroberta-base-finetuned-wikitext2
a2a9b78dd3a8b524b930dcd71cea17db50863084
2022-07-06T10:02:54.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
f00d
null
f00d/distilroberta-base-finetuned-wikitext2
10
null
transformers
11,992
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilroberta-base-finetuned-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilroberta-base-finetuned-wikitext2 This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8343 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.0842 | 1.0 | 2406 | 1.9219 | | 1.9913 | 2.0 | 4812 | 1.8822 | | 1.9596 | 3.0 | 7218 | 1.8215 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
mbshr/urt5-base-finetuned
c19132e2e1d37ab0307220f560cdba3388e64188
2022-07-06T19:49:23.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
mbshr
null
mbshr/urt5-base-finetuned
10
null
transformers
11,993
Entry not found
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-512-5-30
74d53f6fecfa1c948d71c0723233fea48b3bc113
2022-07-07T14:20:09.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-512-5-30
10
null
transformers
11,994
Entry not found
dminiotas05/distilbert-base-uncased-finetuned-ft500_6class600
836385f37f9ec92cb23d0452601bf675b9ecca79
2022-07-07T13:23:59.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
dminiotas05
null
dminiotas05/distilbert-base-uncased-finetuned-ft500_6class600
10
null
transformers
11,995
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-ft500_6class600 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ft500_6class600 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.6317 - Accuracy: 0.35 - F1: 0.3327 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 1.5717 | 1.0 | 188 | 1.5375 | 0.3067 | 0.2820 | | 1.4338 | 2.0 | 376 | 1.5354 | 0.3207 | 0.2824 | | 1.3516 | 3.0 | 564 | 1.4852 | 0.3573 | 0.3287 | | 1.2722 | 4.0 | 752 | 1.4997 | 0.366 | 0.3534 | | 1.1923 | 5.0 | 940 | 1.5474 | 0.362 | 0.3454 | | 1.1156 | 6.0 | 1128 | 1.5998 | 0.3547 | 0.3387 | | 1.0522 | 7.0 | 1316 | 1.6154 | 0.3473 | 0.3316 | | 1.0148 | 8.0 | 1504 | 1.6317 | 0.35 | 0.3327 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
akhisreelibra/mt5-small-finetuned-oneindia
8690764e4b598b2aa6d94d4af20b86df043fe06c
2022-07-08T01:05:04.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "summarization", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
summarization
false
akhisreelibra
null
akhisreelibra/mt5-small-finetuned-oneindia
10
null
transformers
11,996
kwmr/wav2vec2_japanese
f7b020bbe975d54cc9e767a2880695121fd82d97
2022-07-07T20:33:05.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
kwmr
null
kwmr/wav2vec2_japanese
10
2
transformers
11,997
## Wav2Vec2.0 XLSR-53 large model の日本語 Fine Tuning モデル [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53)を日本語用にFine Tuningしたモデル ## 使用データセット - [Common Voice](https://commonvoice.mozilla.org/ja) ## 使い方 ```python from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC from datasets import load_dataset import torch # load model and processor processor = Wav2Vec2Processor.from_pretrained("kwmr/wav2vec2_japanese") model = Wav2Vec2ForCTC.from_pretrained("kwmr/wav2vec2_japanese") ```
dminiotas05/distilbert-base-uncased-finetuned-ft650_6class
1f59f4e2e98fa2960c0291cd7654806a33f72425
2022-07-08T12:11:05.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
dminiotas05
null
dminiotas05/distilbert-base-uncased-finetuned-ft650_6class
10
null
transformers
11,998
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-ft650_6class results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ft650_6class This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4555 - Accuracy: 0.3707 - F1: 0.3625 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 1.5838 | 1.0 | 188 | 1.5235 | 0.3253 | 0.2947 | | 1.4521 | 2.0 | 376 | 1.4744 | 0.3467 | 0.3234 | | 1.3838 | 3.0 | 564 | 1.4565 | 0.358 | 0.3483 | | 1.323 | 4.0 | 752 | 1.4555 | 0.3707 | 0.3625 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
steven123/Check_Missing_Teeth
c185ff48cfeabbe5cd88fb79c509782264c058a2
2022-07-08T22:59:30.000Z
[ "pytorch", "tensorboard", "vit", "image-classification", "transformers", "huggingpics", "model-index" ]
image-classification
false
steven123
null
steven123/Check_Missing_Teeth
10
null
transformers
11,999
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: Check_Missing_Teeth results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9375 --- # Check_Missing_Teeth Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### Missing Teeth ![Missing Teeth](images/Missing_Teeth.jpg) #### Non-Missing Teeth ![Non-Missing Teeth](images/Non-Missing_Teeth.jpg)