modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
kleinay/qasrl-seq2seq-model
d4ab640472add4bbcc611d7a2e832752591e8ed6
2022-06-03T08:08:43.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
kleinay
null
kleinay/qasrl-seq2seq-model
1
null
transformers
32,600
Entry not found
erickfm/t5-large-finetuned-bias-v6
f400c4876c20fb2a5ad9937705e70fba941f67d4
2022-06-04T08:56:19.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-large-finetuned-bias-v6
1
null
transformers
32,601
Entry not found
Bistolero/en_ge_20_20
58b83b85a25fa8ef43bb80a6423338854b4df8ce
2022-06-03T10:26:42.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Bistolero
null
Bistolero/en_ge_20_20
1
null
transformers
32,602
Entry not found
PontifexMaximus/mt5-small-parsinlu-opus-translation_fa_en-finetuned-fa-to-en
d6755563f63d51b972a45594dd6579de77bc24c1
2022-06-07T15:17:41.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "dataset:opus_infopankki", "transformers", "generated_from_trainer", "license:cc-by-nc-sa-4.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
PontifexMaximus
null
PontifexMaximus/mt5-small-parsinlu-opus-translation_fa_en-finetuned-fa-to-en
1
null
transformers
32,603
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer datasets: - opus_infopankki metrics: - bleu model-index: - name: mt5-small-parsinlu-opus-translation_fa_en-finetuned-fa-to-en results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: opus_infopankki type: opus_infopankki args: en-fa metrics: - name: Bleu type: bleu value: 15.1329 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-parsinlu-opus-translation_fa_en-finetuned-fa-to-en This model is a fine-tuned version of [persiannlp/mt5-small-parsinlu-opus-translation_fa_en](https://huggingface.co/persiannlp/mt5-small-parsinlu-opus-translation_fa_en) on the opus_infopankki dataset. It achieves the following results on the evaluation set: - Loss: 1.9193 - Bleu: 15.1329 - Gen Len: 13.4603 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 3.1182 | 1.0 | 1807 | 2.5985 | 10.6445 | 13.7938 | | 2.8377 | 2.0 | 3614 | 2.3799 | 11.852 | 13.6168 | | 2.6644 | 3.0 | 5421 | 2.2426 | 12.877 | 13.5768 | | 2.5286 | 4.0 | 7228 | 2.1521 | 13.5342 | 13.5567 | | 2.4523 | 5.0 | 9035 | 2.0801 | 14.0355 | 13.5387 | | 2.4026 | 6.0 | 10842 | 2.0197 | 14.4284 | 13.4956 | | 2.317 | 7.0 | 12649 | 1.9691 | 14.7776 | 13.4325 | | 2.3174 | 8.0 | 14456 | 1.9373 | 15.189 | 13.4261 | | 2.3374 | 9.0 | 16263 | 1.9393 | 15.1149 | 13.4087 | | 2.3131 | 10.0 | 18070 | 1.9304 | 15.0654 | 13.4234 | | 2.295 | 11.0 | 19877 | 1.9239 | 15.102 | 13.4443 | | 2.3017 | 12.0 | 21684 | 1.9203 | 15.1676 | 13.4575 | | 2.3153 | 13.0 | 23491 | 1.9193 | 15.1329 | 13.4603 | | 2.2939 | 14.0 | 25298 | 1.9193 | 15.1329 | 13.4603 | | 2.3241 | 15.0 | 27105 | 1.9193 | 15.1329 | 13.4603 | | 2.3376 | 16.0 | 28912 | 1.9193 | 15.1329 | 13.4603 | | 2.2859 | 17.0 | 30719 | 1.9193 | 15.1329 | 13.4603 | | 2.3016 | 18.0 | 32526 | 1.9193 | 15.1329 | 13.4603 | | 2.3101 | 19.0 | 34333 | 1.9193 | 15.1329 | 13.4603 | | 2.3088 | 20.0 | 36140 | 1.9193 | 15.1329 | 13.4603 | | 2.2833 | 21.0 | 37947 | 1.9193 | 15.1329 | 13.4603 | | 2.2986 | 22.0 | 39754 | 1.9193 | 15.1329 | 13.4603 | | 2.3254 | 23.0 | 41561 | 1.9193 | 15.1329 | 13.4603 | | 2.3165 | 24.0 | 43368 | 1.9193 | 15.1329 | 13.4603 | | 2.289 | 25.0 | 45175 | 1.9193 | 15.1329 | 13.4603 | | 2.3212 | 26.0 | 46982 | 1.9193 | 15.1329 | 13.4603 | | 2.2902 | 27.0 | 48789 | 1.9193 | 15.1329 | 13.4603 | | 2.3026 | 28.0 | 50596 | 1.9193 | 15.1329 | 13.4603 | | 2.2949 | 29.0 | 52403 | 1.9193 | 15.1329 | 13.4603 | | 2.3152 | 30.0 | 54210 | 1.9193 | 15.1329 | 13.4603 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.7.1+cu110 - Datasets 2.2.2 - Tokenizers 0.12.1
arrandi/xlm-roberta-base-finetuned-panx-de
42a34e7f4362918b675e5701e28f59cb8a20931d
2022-06-03T14:27:43.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
arrandi
null
arrandi/xlm-roberta-base-finetuned-panx-de
1
null
transformers
32,604
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8620945214069894 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1372 - F1: 0.8621 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2575 | 1.0 | 525 | 0.1621 | 0.8292 | | 0.1287 | 2.0 | 1050 | 0.1378 | 0.8526 | | 0.0831 | 3.0 | 1575 | 0.1372 | 0.8621 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
tdobrxl/opus-mt-en-vi-finetuned-IWSLT15
959db39d99018650b78b807a733991ceeed6aa97
2022-06-06T11:03:20.000Z
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
tdobrxl
null
tdobrxl/opus-mt-en-vi-finetuned-IWSLT15
1
null
transformers
32,605
# Overview This is a fine-tuned version of the model [Helsinki-NLP/opus-mt-en-vi](https://huggingface.co/Helsinki-NLP/opus-mt-en-vi?text=My+name+is+Sarah+and+I+live+in+London) on the dataset [IWSLT'15 English-Vietnamese](https://huggingface.co/datasets/mt_eng_vietnamese). Performance in terms of [sacrebleu](https://huggingface.co/docs/datasets/v1.5.0/using_metrics.html) on the test set is as follows: * Original opus-mt-en-vi: 29.83 * Fine-tuned opus-mt-en-vi: 37.35 # Parameters * learning_rate=2e-5 * batch_size: 32 * weight_decay=0.01 * num_train_epochs=1 # Thoughts * Model `Helsinki-NLP/opus-mt-en-vi` is small (around 260MB), and can be easily deployed to a cheap server (e.g., EC2 t2.medium) without a GPU * Easier and much faster to train compared to t5 or byt5.
jppaolim/v48_GPT2Medium_PT
44ea5e187f0cc74eafaaa206816b5e7bb3466747
2022-06-03T15:07:49.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
jppaolim
null
jppaolim/v48_GPT2Medium_PT
1
null
transformers
32,606
# My Story model {'top_p': 0.9, 'top_k': 50, 'temperature': 1, 'repetition_penalty': 1} Arthur goes to the beach. Arthur wants to go to the beach. He calls the beach and asks for a spot on the sand. Arthur gets a new friend with a beach towel. Arthur takes the beach. Arthur spends the day relaxing and having a great time. Arthur goes to the beach. Arthur is feeling really bored. He decides to go to the beach. He packs up his bags and drives to the beach. Arthur spends the next two days swimming at the beach. He is so glad he got to spend some time at the beach. Arthur goes to the beach. Arthur really wanted to go to the beach. He went to the beach in his car. He spent the whole day on the beach. He finally had a great day of swimming. Arthur really enjoyed the beach. Arthur goes to the beach. Arthur had always wanted to go to the beach. Arthur saved up his money for a few weeks. Arthur went to the beach with his savings. Arthur had a great time at the beach. Arthur is now planning his next trip. Arthur goes to the beach. Arthur loves to go to the beach. He loves to go to the sand. Arthur took his friend with him to the beach. Arthur played in the ocean for a long time. Arthur got his sand and went home. {'top_p': 0.9, 'top_k': 50, 'temperature': 1, 'repetition_penalty': 1.05} Arthur goes to the beach. Arthur is excited to go to the beach. Arthur packs his beach towel. Arthur drives to the beach. Arthur spends the entire day at the beach. Arthur has a great day at the beach. Arthur goes to the beach. Arthur was a lonely boy who wanted a place to stay. His parents wanted him to go to the beach. They convinced Arthur to go to the beach with them. Arthur was so happy to go to the beach. He was so happy to get to play in the ocean with his friends. Arthur goes to the beach. Arthur decided he needed to go to the beach. He called his friends to come and see the beach. They met up at the beach. Arthur and his friends went to the beach and played. Arthur went home and had a good day. Arthur goes to the beach. Arthur is sitting at home reading a book. He decides he will play a game of basketball. Arthur decides to play a game of basketball. He plays his game with his family and friends. Arthur is very happy that he played basketball. Arthur goes to the beach. Arthur and his friends went to the beach. Arthur found out that he had a bad sunburn. Arthur had to go to the doctor for his sunburn. The doctor recommended an ointment to Arthur. Arthur had no more bad sunburns after that. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.8, 'repetition_penalty': 1.1} Arthur goes to the beach. Arthur is bored one afternoon. He calls his friend Frank and tells him to go to the beach. Arthur drives to the beach and spends the day playing in the sand. After the sun went down, Arthur went home and watched a movie. Now, Arthur is very tired after a long day of work. Arthur goes to the beach. Arthur is bored one summer day. He asks his parents for some time off. His parents tell him he has to go the beach. Arthur packs up his car and takes a trip. When he gets back home, Arthur is happy that he went the beach. Arthur goes to the beach. Arthur had always wanted to go to the beach. Arthur's friends encourage him to go. Finally Arthur agrees to go to the beach. At the beach he spends a very relaxing day at the beach. Arthur is glad that he went to the beach. Arthur goes to the beach. Arthur wants to go to the beach. He gets his stuff together and drives to the beach. While on the beach he meets a nice young man named Dave. Dave and Arthur fall in love. Arthur and Dave become friends and start dating. Arthur goes to the beach. Arthur is bored on a weekend afternoon. He decides to go to the beach. Arthur packs his beach bag and leaves. Arthur arrives at the beach. Arthur spends the day at the beach. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.6, 'repetition_penalty': 1.15} Arthur goes to the beach. Arthur has always wanted to go to the beach. He decides to go to the beach one day. At the beach he spends the entire day at the beach. He has a great time. Arthur is very happy that he went to the beach. Arthur goes to the beach. Arthur is bored one day. He decides to go to the beach. When he gets there, he spends all day at the beach. Arthur has a great time at the beach. He will not leave the beach for another day. Arthur goes to the beach. Arthur is bored on a summer day. He decides he wants to go to the beach. Arthur packs his bags and drives out to the ocean. Once at the beach, Arthur spends all day playing in the sand. Now that he has had so much fun, he plans to do it again soon. Arthur goes to the beach. Arthur was feeling bored one day. He decided to go to the beach. Arthur went to the beach and played in the sand. Arthur felt so much better after playing in the sand. Arthur was glad he had gone to the beach. Arthur goes to the beach. Arthur was excited for a day at the beach. He had packed his beach bag and his sunscreen. Arthur went to the beach with his friends. Arthur played in the sand all day. Arthur returned home with lots of sunburns. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.4, 'repetition_penalty': 1.2} Arthur goes to the beach. Arthur is bored one day. He decides he would like a day off. Arthur calls his friends and tells them about the day. They all go to the beach and play games. Arthur has a great day at the beach. Arthur goes to the beach. Arthur is bored one day. He decides he needs a way to pass the time. Arthur takes his beach bag and drives to the beach. Arthur spends the entire day at the beach. Arthur has a great day at the beach. Arthur goes to the beach. Arthur is bored one day. He decides he would like a day off. So Arthur takes a trip to the beach. Arthur spends the entire day at the beach. Arthur has a great day at the beach. Arthur goes to the beach. Arthur is bored one day. He decides to go to the beach. Arthur spends the entire day at the beach. When he gets home, Arthur feels happy. Now that he has gone to the beach, Arthur is no longer bored. Arthur goes to the beach. Arthur is feeling bored one day. He decides he would like to go to the beach. Arthur packs up his beach bag and drives down to the beach. While at the beach, Arthur sees many people playing in the water. Arthur has a great time at the beach with his friends.
roshnir/xlmr-base-ft-mlqa-dev-en-hi
36103165676c1cc6e265207325fe9c89f7bc466a
2022-06-03T15:42:02.000Z
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/xlmr-base-ft-mlqa-dev-en-hi
1
null
transformers
32,607
Entry not found
Splend1dchan/xtreme_s_xlsr_t5lephone-small_residual_minds14.en-all
2941f9c7f685a453b5d567ce3e7c859fad6d4b29
2022-06-04T05:15:02.000Z
[ "pytorch", "tensorboard", "wav2vec2", "transformers" ]
null
false
Splend1dchan
null
Splend1dchan/xtreme_s_xlsr_t5lephone-small_residual_minds14.en-all
1
null
transformers
32,608
Entry not found
sayanmandal/t5-small_6_3-hinglish
74bfad6171723766d3604ac2140631302e8996e9
2022-06-04T02:31:25.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
sayanmandal
null
sayanmandal/t5-small_6_3-hinglish
1
null
transformers
32,609
Entry not found
roshnir/mBert-finetuned-mlqa-dev-vi-hi
6d16746ab215e23861d3230fc977ce1401e1aada
2022-06-03T19:48:02.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/mBert-finetuned-mlqa-dev-vi-hi
1
null
transformers
32,610
Entry not found
santiviquez/mt5-small-finetuned-samsum-en
481c2e3f6d61aae6f8f5ee235040ba18f85a52fd
2022-06-07T14:59:42.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "summarization", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
summarization
false
santiviquez
null
santiviquez/mt5-small-finetuned-samsum-en
1
null
transformers
32,611
--- license: apache-2.0 tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-finetuned-samsum-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-samsum-en This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.4304 - Rouge1: 21.9966 - Rouge2: 9.1451 - Rougel: 19.532 - Rougelsum: 20.6359 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | No log | 1.0 | 125 | 4.0396 | 8.9392 | 1.5339 | 8.1146 | 8.538 | | No log | 2.0 | 250 | 3.0166 | 17.0822 | 6.0564 | 15.1854 | 16.2353 | | No log | 3.0 | 375 | 2.7375 | 18.9169 | 7.0912 | 16.8087 | 17.7473 | | No log | 4.0 | 500 | 2.5996 | 20.5929 | 7.8755 | 18.2074 | 19.3914 | | No log | 5.0 | 625 | 2.5095 | 21.1958 | 8.7027 | 18.8919 | 19.9921 | | No log | 6.0 | 750 | 2.4641 | 21.2479 | 8.8452 | 18.9289 | 19.9557 | | No log | 7.0 | 875 | 2.4341 | 22.1418 | 9.1294 | 19.6073 | 20.7666 | | No log | 8.0 | 1000 | 2.4304 | 21.9966 | 9.1451 | 19.532 | 20.6359 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
jgriffi/xlm-roberta-base-finetuned-panx-de-fr
8bbdf7857a80b2c2fcc242ba11b93ad35de1cfd5
2022-06-03T23:42:57.000Z
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
jgriffi
null
jgriffi/xlm-roberta-base-finetuned-panx-de-fr
1
null
transformers
32,612
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de-fr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1774 - F1: 0.8594 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.3029 | 1.0 | 1430 | 0.1884 | 0.8237 | | 0.1573 | 2.0 | 2860 | 0.1770 | 0.8473 | | 0.0959 | 3.0 | 4290 | 0.1774 | 0.8594 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
jgriffi/xlm-roberta-base-finetuned-panx-fr
77efa863f71b3be7628bb3a4bc667b460afd6cca
2022-06-04T00:16:41.000Z
[ "pytorch", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
jgriffi
null
jgriffi/xlm-roberta-base-finetuned-panx-fr
1
null
transformers
32,613
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-fr results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.fr metrics: - name: F1 type: f1 value: 0.9320766980825479 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.0994 - F1: 0.9321 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.5314 | 1.0 | 382 | 0.2522 | 0.8277 | | 0.2555 | 2.0 | 764 | 0.1414 | 0.9059 | | 0.1667 | 3.0 | 1146 | 0.0994 | 0.9321 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
jgriffi/xlm-roberta-base-finetuned-panx-it
825bfb8f0a1e25e937a2a796c440ba49127eb7c3
2022-06-04T00:32:43.000Z
[ "pytorch", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
jgriffi
null
jgriffi/xlm-roberta-base-finetuned-panx-it
1
null
transformers
32,614
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-it results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.it metrics: - name: F1 type: f1 value: 0.8374017376913528 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-it This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2556 - F1: 0.8374 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.6559 | 1.0 | 140 | 0.2821 | 0.7862 | | 0.251 | 2.0 | 280 | 0.2658 | 0.8179 | | 0.1457 | 3.0 | 420 | 0.2556 | 0.8374 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
jgriffi/xlm-roberta-base-finetuned-panx-en
647ca5231ab9968d0bfa37eb973342dc89d6b8e3
2022-06-04T00:48:44.000Z
[ "pytorch", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
jgriffi
null
jgriffi/xlm-roberta-base-finetuned-panx-en
1
null
transformers
32,615
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-en results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.en metrics: - name: F1 type: f1 value: 0.7054833239118146 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.4218 - F1: 0.7055 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.9596 | 1.0 | 99 | 0.5244 | 0.5827 | | 0.4403 | 2.0 | 198 | 0.4184 | 0.6764 | | 0.3253 | 3.0 | 297 | 0.4218 | 0.7055 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
jgriffi/xlm-roberta-base-finetuned-panx-all
5271ebb81eac0ce125100d750299dffb00a7077d
2022-06-04T01:24:48.000Z
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
jgriffi
null
jgriffi/xlm-roberta-base-finetuned-panx-all
1
null
transformers
32,616
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-all This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1448 - F1: 0.8881 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.3029 | 1.0 | 1669 | 0.2075 | 0.7971 | | 0.164 | 2.0 | 3338 | 0.1612 | 0.8680 | | 0.1025 | 3.0 | 5007 | 0.1448 | 0.8881 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
huggingtweets/katieoneuro
f6db0e917dcd77ecf62a9394402d9419735d5872
2022-06-04T01:31:59.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/katieoneuro
1
null
transformers
32,617
--- language: en thumbnail: http://www.huggingtweets.com/katieoneuro/1654306303616/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1000482851853340672/LhUdoFyk_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Katie O'Nell 🧠💻</div> <div style="text-align: center; font-size: 14px;">@katieoneuro</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Katie O'Nell 🧠💻. | Data | Katie O'Nell 🧠💻 | | --- | --- | | Tweets downloaded | 552 | | Retweets | 323 | | Short tweets | 17 | | Tweets kept | 212 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2umesznv/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @katieoneuro's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2evfy6ho) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2evfy6ho/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/katieoneuro') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
LinaR/Prediccion_titulos
46e265fe6670c40d699583831d2a6a7bffafcf56
2022-06-04T04:44:50.000Z
[ "pytorch", "tf", "t5", "text2text-generation", "transformers", "generated_from_keras_callback", "model-index", "autotrain_compatible" ]
text2text-generation
false
LinaR
null
LinaR/Prediccion_titulos
1
null
transformers
32,618
--- tags: - generated_from_keras_callback model-index: - name: Prediccion_titulos results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Prediccion_titulos Este modelo predice los encabezados de las noticias ## Model description Este modelo fue entrenado con un Transformador T5 y una base de datos en español ## Intended uses & limitations More information needed ## Training and evaluation data Los datos fueron tomado del siguiente dataset de Kaggle : https://www.kaggle.com/datasets/josemamuiz/noticias-laraznpblico, el cual es un conjunto de datos se extrajo de las webs de periódicos españoles ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: None - training_precision: float32 ### Training results ### Framework versions - Transformers 4.19.2 - TensorFlow 2.8.2 - Datasets 2.2.2 - Tokenizers 0.12.1
newlife/AlQgen
39f65aa7a9a7a676b9ae57db867e330ce3625454
2022-06-04T09:10:52.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
newlife
null
newlife/AlQgen
1
null
transformers
32,619
Entry not found
newlife/openq-generator
1305312190cc20b9f33f17e7d8d3bed9a11ebb4e
2022-06-04T11:10:12.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
newlife
null
newlife/openq-generator
1
null
transformers
32,620
Entry not found
roshnir/xlmr-finetuned-mlqa-dev-hi
b56c72d891c8c5de3bc39a9035b5da72631f4ff5
2022-06-04T09:29:18.000Z
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/xlmr-finetuned-mlqa-dev-hi
1
null
transformers
32,621
Entry not found
aggtamv/wav2vec_2.0_extra_vocab
d4c03727153155e59cb8f2191a2dc875192aa4a4
2022-06-08T08:56:07.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
aggtamv
null
aggtamv/wav2vec_2.0_extra_vocab
1
null
transformers
32,622
Entry not found
cutten/wav2vec2-base-timit-demo-google-colab
6b78313144330c8eca5df59dee12e76f859d9a4e
2022-06-07T03:35:57.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
cutten
null
cutten/wav2vec2-base-timit-demo-google-colab
1
null
transformers
32,623
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-base-timit-demo-google-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-google-colab This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.6342 - Wer: 0.5808 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 9.1358 | 1.19 | 500 | 3.2710 | 1.0 | | 3.0499 | 2.38 | 1000 | 1.8976 | 1.0 | | 1.279 | 3.56 | 1500 | 0.7502 | 0.8228 | | 0.7953 | 4.75 | 2000 | 0.5914 | 0.7343 | | 0.6451 | 5.94 | 2500 | 0.6152 | 0.7280 | | 0.5351 | 7.13 | 3000 | 0.5948 | 0.7041 | | 0.4633 | 8.31 | 3500 | 0.5585 | 0.6712 | | 0.4272 | 9.5 | 4000 | 0.5372 | 0.6457 | | 0.3803 | 10.69 | 4500 | 0.5404 | 0.6402 | | 0.3462 | 11.88 | 5000 | 0.5862 | 0.6484 | | 0.3302 | 13.06 | 5500 | 0.5991 | 0.6426 | | 0.3096 | 14.25 | 6000 | 0.5687 | 0.6287 | | 0.2839 | 15.44 | 6500 | 0.5798 | 0.6384 | | 0.2701 | 16.63 | 7000 | 0.5775 | 0.6047 | | 0.2507 | 17.81 | 7500 | 0.5638 | 0.6065 | | 0.2376 | 19.0 | 8000 | 0.5937 | 0.6094 | | 0.2264 | 20.19 | 8500 | 0.5944 | 0.6065 | | 0.2146 | 21.38 | 9000 | 0.6050 | 0.6122 | | 0.1947 | 22.57 | 9500 | 0.6283 | 0.5992 | | 0.1982 | 23.75 | 10000 | 0.6126 | 0.6018 | | 0.1924 | 24.94 | 10500 | 0.6075 | 0.5962 | | 0.1855 | 26.13 | 11000 | 0.6344 | 0.5938 | | 0.1839 | 27.32 | 11500 | 0.6118 | 0.5880 | | 0.1741 | 28.5 | 12000 | 0.6381 | 0.5878 | | 0.1726 | 29.69 | 12500 | 0.6342 | 0.5808 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0 - Datasets 2.2.2 - Tokenizers 0.12.1
SaiNikhileshReddy/xlm-roberta-large-finetuned-ner
b25d20bdf5c28aec5b271d20ff80cae6bd0df1a9
2022-06-04T18:26:17.000Z
[ "pytorch", "xlm-roberta", "token-classification", "dataset:hi_ner_config", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
SaiNikhileshReddy
null
SaiNikhileshReddy/xlm-roberta-large-finetuned-ner
1
null
transformers
32,624
--- license: mit tags: - generated_from_trainer datasets: - hi_ner_config model-index: - name: xlm-roberta-large-finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-large-finetuned-ner This model is a fine-tuned version of [xlm-roberta-large](https://huggingface.co/xlm-roberta-large) on the hi_ner_config dataset. It achieves the following results on the evaluation set: - eval_loss: 0.2329 - eval_precision: 0.7110 - eval_recall: 0.6854 - eval_f1: 0.6980 - eval_accuracy: 0.9332 - eval_runtime: 162.3478 - eval_samples_per_second: 66.9 - eval_steps_per_second: 16.73 - epoch: 2.64 - step: 50198 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
huggingtweets/orc_nft
980231871024b21e0b8849ac2c4947b830200b0e
2022-06-04T16:13:13.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/orc_nft
1
null
transformers
32,625
--- language: en thumbnail: http://www.huggingtweets.com/orc_nft/1654359188989/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1510438749154549764/sar63AXD_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">ORC.A ⍬</div> <div style="text-align: center; font-size: 14px;">@orc_nft</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from ORC.A ⍬. | Data | ORC.A ⍬ | | --- | --- | | Tweets downloaded | 1675 | | Retweets | 113 | | Short tweets | 544 | | Tweets kept | 1018 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/wwc37qkh/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @orc_nft's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/debtzj0e) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/debtzj0e/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/orc_nft') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
grimar/tp_nlp_Roberta_1E
9f12fdec291ee41b6982879f1dec3c3e9eed90a9
2022-06-04T18:09:33.000Z
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
false
grimar
null
grimar/tp_nlp_Roberta_1E
1
null
transformers
32,626
Entry not found
atoivat/distilbert-base-uncased-finetuned-squad
3eba1a8799d9e852d50693977d4225648f6bd1f5
2022-06-04T21:13:36.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
atoivat
null
atoivat/distilbert-base-uncased-finetuned-squad
1
null
transformers
32,627
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.1504 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2086 | 1.0 | 5533 | 1.1565 | | 0.9515 | 2.0 | 11066 | 1.1225 | | 0.7478 | 3.0 | 16599 | 1.1504 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
huggingtweets/centraldamiku
cddf9545758013175974dea577a522969735af9e
2022-06-04T18:14:42.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/centraldamiku
1
null
transformers
32,628
--- language: en thumbnail: http://www.huggingtweets.com/centraldamiku/1654366478559/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1532142310741495808/VWMuTyjo_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Central da Miku</div> <div style="text-align: center; font-size: 14px;">@centraldamiku</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Central da Miku. | Data | Central da Miku | | --- | --- | | Tweets downloaded | 3242 | | Retweets | 348 | | Short tweets | 801 | | Tweets kept | 2093 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/m8jk5mo9/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @centraldamiku's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/rp6i3tpo) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/rp6i3tpo/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/centraldamiku') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
NadiaSan/udesa-model-aah-es
a5c0771e02cb291a48c56a61b958237c9d4e1a10
2022-06-04T21:05:42.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
NadiaSan
null
NadiaSan/udesa-model-aah-es
1
null
transformers
32,629
Entry not found
huggingtweets/tomcooper26-tomncooper
f7007f3313ffe97d52aac3c1691c1e457d4e9434
2022-06-04T21:53:08.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/tomcooper26-tomncooper
1
null
transformers
32,630
--- language: en thumbnail: http://www.huggingtweets.com/tomcooper26-tomncooper/1654379583668/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/378800000155926309/6204f6960618d11ff5a7e2b21ae9db03_400x400.jpeg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/290863981/monkey_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Tom Cooper & Tom Cooper</div> <div style="text-align: center; font-size: 14px;">@tomcooper26-tomncooper</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Tom Cooper & Tom Cooper. | Data | Tom Cooper | Tom Cooper | | --- | --- | --- | | Tweets downloaded | 2092 | 3084 | | Retweets | 179 | 687 | | Short tweets | 223 | 59 | | Tweets kept | 1690 | 2338 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/dndifpco/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @tomcooper26-tomncooper's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/97vltow9) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/97vltow9/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/tomcooper26-tomncooper') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
huggingtweets/thundering165
8e8d54b688cb66234b9dccd675ec60d59cb8d204
2022-06-05T00:16:55.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/thundering165
1
null
transformers
32,631
--- language: en thumbnail: http://www.huggingtweets.com/thundering165/1654388210270/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/636093493207666689/dLDycSCd_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Paul Harvey</div> <div style="text-align: center; font-size: 14px;">@thundering165</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Paul Harvey. | Data | Paul Harvey | | --- | --- | | Tweets downloaded | 3248 | | Retweets | 381 | | Short tweets | 120 | | Tweets kept | 2747 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/3uv1udbr/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @thundering165's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3hlf7pk2) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3hlf7pk2/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/thundering165') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
SmartPy/mt5-small-finetuned-amazon-en-es
8c5c5483ff95868225f2c7438f356975d58acd80
2022-06-05T13:45:57.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
SmartPy
null
SmartPy/mt5-small-finetuned-amazon-en-es
1
null
transformers
32,632
Entry not found
huggingtweets/cboldisor
48d69630aa613907ad0ab1c3f8247bc42bf3cb8b
2022-06-05T08:48:23.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/cboldisor
1
null
transformers
32,633
--- language: en thumbnail: http://www.huggingtweets.com/cboldisor/1654418897981/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1272169107077677057/Cpv0UEsW_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Costin Boldisor</div> <div style="text-align: center; font-size: 14px;">@cboldisor</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Costin Boldisor. | Data | Costin Boldisor | | --- | --- | | Tweets downloaded | 153 | | Retweets | 1 | | Short tweets | 8 | | Tweets kept | 144 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/15uralj1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @cboldisor's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1kbdt4l8) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1kbdt4l8/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/cboldisor') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
nestoralvaro/mT5_multilingual_XLSum-finetuned-xsum-mlsum
6f003dea0eb36f15b376ac492f4535e9faca60c1
2022-06-05T16:47:06.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "dataset:mlsum", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
nestoralvaro
null
nestoralvaro/mT5_multilingual_XLSum-finetuned-xsum-mlsum
1
null
transformers
32,634
--- tags: - generated_from_trainer datasets: - mlsum metrics: - rouge model-index: - name: mT5_multilingual_XLSum-finetuned-xsum-mlsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: mlsum type: mlsum args: es metrics: - name: Rouge1 type: rouge value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mT5_multilingual_XLSum-finetuned-xsum-mlsum This model is a fine-tuned version of [csebuetnlp/mT5_multilingual_XLSum](https://huggingface.co/csebuetnlp/mT5_multilingual_XLSum) on the mlsum dataset. It achieves the following results on the evaluation set: - Loss: nan - Rouge1: 0.0 - Rouge2: 0.0 - Rougel: 0.0 - Rougelsum: 0.0 - Gen Len: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 0.0 | 1.0 | 66592 | nan | 0.0 | 0.0 | 0.0 | 0.0 | 1.0 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
TheoMrc/b3_ache_tests
99236f21d3d3b1b08998705818f21bf47558bc54
2022-06-05T23:43:46.000Z
[ "pytorch", "segformer", "transformers" ]
null
false
TheoMrc
null
TheoMrc/b3_ache_tests
1
null
transformers
32,635
Entry not found
jppaolim/v52_Large
a54a0fc9356cd17a6d65d1f48e9d525a690d7d8a
2022-06-05T11:04:46.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
jppaolim
null
jppaolim/v52_Large
1
null
transformers
32,636
# My Story model {'top_p': 0.9, 'top_k': 50, 'temperature': 1, 'repetition_penalty': 1} Arthur goes to the beach. Arthur was driving home from work. He had to stop at a gas station. He bought a lottery ticket and bought some tickets. He bought some lottery tickets and played them all. He got really lucky and was won the jack jack jack jack jack prize. Arthur goes to the beach. Arthur decides to go to the beach. First he has to sit on the sand. Next, he would walk the beach. Finally, he can go to the park. Once they get to the beach, he'll go swimming. Arthur goes to the beach. Arthur goes to the beach. He is bored while watching the sand. He takes a nap. Finally he goes to the water. He gets a nap and heads home. Arthur goes to the beach. Arthur decides to go to the beach. He begins to put on his shoes and walk to the beach. Finally he comes home to find his dad with him. He is happy he got to see his dad and his dad were together. He decides to go home to rest. Arthur goes to the beach. Arthur went to the beach to play volleyball. He was excited to be there playing. After playing, his foot broke his hip. His dad had to take him to the hospital. Luckily, the injury was minimal and he went back to playing. {'top_p': 0.9, 'top_k': 50, 'temperature': 1, 'repetition_penalty': 1.05} Arthur goes to the beach. Arthur decides to go to the beach. He begins his day by going to the beach with his dad. At the beach, he played for his friends and watched a movie. Afterwards, they went to a local bar after the movie. They had a good time. Arthur goes to the beach. Arthur decided to go to the beach with his friends. They had a fun day there and played volleyball all day. He had a good time playing in the beach. His friend ended up taking him out for dinner that night. He had a nice meal with friends. Arthur goes to the beach. Arthur was going to the beach on Friday. He decided to take his son to the beach. His son spent the weekend playing. In the afternoon his mom went to the mall to see the ocean. She loved watching the ocean. Arthur goes to the beach. A friend invited me to go to the beach. I agreed and put on my favorite outfit. It took me forever to find my bathing suit, and it was a lot difficult. The ride to the beach was worth a great day! Arthur goes to the beach. Arthur decided that he wanted to go to the beach. He went to the beach. After a few hours, he left the beach and went to the water. While swimming he found his mother asleep and was able to see the world. When he woke up, he felt very happy. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.8, 'repetition_penalty': 1.1} Arthur goes to the beach. Arthur was on vacation in Hawaii. He decided to go to the beach. He rented a car and drove to the beach. He went for an hour relaxing in the water. After the hours, he went home. Arthur goes to the beach. Arthur wanted to go to the beach with his friends. They drove to the beach and spent the day relaxing and relaxing. When they got home, they decided to play volleyball. Afterwards, they all played volleyball and had a great time. After that, they had a nice dinner and had a wonderful time. Arthur goes to the beach. Arthur is going to the beach with his family. He decides to take his family and his family to the beach. While there, they watch the ocean and relax. Afterwards, they spent the day playing volleyball. After the sun was over, they headed home. Arthur goes to the beach. Arthur was going to go to the beach with his friend. They decided to take a road trip to Las Vegas. Once they arrived, the friends began to spend a day relaxing and playing. After a few hours of relaxing, he went home after playing video games. When he got home, his friend let him stay out on the beach. Arthur goes to the beach. Arthur wanted to go to the beach with his friends. He invited some friends. The friends played volleyball and the football. They had fun. At the end of the day, they all left to go home. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.6, 'repetition_penalty': 1.15} Arthur goes to the beach. Arthur went to the beach with his family. He played volleyball and had fun playing. The other kids decided to play volleyball. They made fun of him for not being able to play. His dad was happy that he got a chance to go to the beach. Arthur goes to the beach. Arthur is going to the beach with his dad. His father takes him to a different park. He spends hours playing volleyball. After that, he heads home and relax on his couch. When he gets home, his mom tells him he can't play. Arthur goes to the beach. Arthur is out on a boat with his friends. He decides to go to the beach. While there, he sees a bunch of people. He spends time talking and playing volleyball. He has fun at the beach all day. Arthur goes to the beach. Arthur was going to go to the beach with his friends. They wanted to spend time together. He decided to take them to the beach for a swim. When they arrived, he had a blast relaxing on the sand. His friends swam and played until he went home. Arthur goes to the beach. Arthur had never been to the beach before. He decided to go to the beach with his friends. When they got there, he was amazed by all of the beauty and sea life. He decided that going to the beach would be the most fun he had! After a few hours of fun, he decided to go home. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.4, 'repetition_penalty': 1.2} Arthur goes to the beach. Arthur was invited to go to the beach with his friends. He decided that he would like to swim. He spent a few hours swimming and playing volleyball. When he got home, he realized that he forgot his sunscreen. He went back to the beach without any more sunblock. Arthur goes to the beach. Arthur is going to the beach with his friends. He wants to go swimming and play volleyball. His friends decide to take him to a park. The boys spend the day playing volleyball and playing volleyball. After the game, he decides to stay home from school. Arthur goes to the beach. Arthur was invited to go to the beach with his friends. He wanted to spend time with them and play volleyball. His friends told him that it would be a good idea to go. The day of the game, Arthur decided to go to the beach. After playing for his friends, he went home. Arthur goes to the beach. Arthur is going to go to the beach with his family. He wants to spend time playing volleyball but he doesn't have enough money. He decides to get a job and earn money by working hard at his local mall. He begins to work as a mechanic and gets paid for it. He goes home and plays volleyball every day. Arthur goes to the beach. Arthur was invited to go to the beach with his friends. He decided to go by himself. When he got there, everyone said that he was too hot. The weather was so cold that he had to leave. After that day, Arthur went home and watched tv instead of going.
Rgl73/xlm-roberta-base-finetuned-panx-de
296915dada7c8c36c28103494726500ccb26d1fe
2022-06-05T15:51:50.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
Rgl73
null
Rgl73/xlm-roberta-base-finetuned-panx-de
1
null
transformers
32,637
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8608532209375177 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1446 - F1: 0.8609 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2623 | 1.0 | 787 | 0.1756 | 0.8132 | | 0.1321 | 2.0 | 1574 | 0.1497 | 0.8458 | | 0.0856 | 3.0 | 2361 | 0.1446 | 0.8609 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0 - Datasets 1.16.1 - Tokenizers 0.10.3
roshnir/xlmr-finetuned-mlqa-dev-ar-hi
3ff2346662bf566b97b83a10f6b63c8268a578ed
2022-06-05T12:12:20.000Z
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/xlmr-finetuned-mlqa-dev-ar-hi
1
null
transformers
32,638
Entry not found
EmileEsmaili/gpt2_pitchfork
923d3bc9179e51925b4d6eb9f327d280de4e10c0
2022-06-05T16:57:37.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
EmileEsmaili
null
EmileEsmaili/gpt2_pitchfork
1
null
transformers
32,639
Entry not found
haritzpuerto/TinyBERT_General_4L_312D-squad
9a2840bf7ac60a2aeb2ff51493b41c55e7b024c9
2022-06-05T13:16:50.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
question-answering
false
haritzpuerto
null
haritzpuerto/TinyBERT_General_4L_312D-squad
1
null
transformers
32,640
--- tags: - generated_from_trainer datasets: - squad model-index: - name: TinyBERT_General_4L_312D-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # TinyBERT_General_4L_312D-squad This model is a fine-tuned version of [huawei-noah/TinyBERT_General_4L_312D](https://huggingface.co/huawei-noah/TinyBERT_General_4L_312D) on the squad dataset. It achieves the following results on the evaluation set: - exact_match: 33.301797540208135 - f1: 45.03886349847048 - Loss: 2.5477 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 20 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7498 | 1.0 | 4380 | 2.5477 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
asahi417/lmqg-mt5-small-dequad
f90d0a007c9012751be0f9213729423d22f2da42
2022-06-09T10:55:29.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
asahi417
null
asahi417/lmqg-mt5-small-dequad
1
null
transformers
32,641
Entry not found
roshnir/xlmr-finetuned-mlqa-dev-zh-hi
9af30f232dce1f7e803780791b92a80ca0f12213
2022-06-05T13:18:56.000Z
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/xlmr-finetuned-mlqa-dev-zh-hi
1
null
transformers
32,642
Entry not found
jppaolim/v53_Large_AdaMW
ee3c43251d9522d9833f3b6db8b85979645a681b
2022-06-05T14:20:25.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
jppaolim
null
jppaolim/v53_Large_AdaMW
1
null
transformers
32,643
# My Story model {'top_p': 0.9, 'top_k': 50, 'temperature': 1, 'repetition_penalty': 1} Arthur goes to the beach. Arthur was driving his dog on the highway. He felt a breeze approaching him. He took his dog out and let her out. The dog jumped and got lost. Arthur found her under a tree for her. Arthur goes to the beach. Arthur decides to go to the beach. First he has to go to the sand. Then he has to walk the beach. Finally he will swim in the ocean. Finally he will swim in the ocean. Arthur goes to the beach. Arthur is the type of person. He wanted to be the one who didn't want to be alone. He joined the swim team in high school. They invited him to swim in a bikini race. He didn't feel comfortable that he was alone. Arthur goes to the beach. Arthur is going to the beach. He heads to the ocean. When he gets to the beach he realizes he forgot his sunscreen. He gets in big trouble for being late. The police write him up. Arthur goes to the beach. Arthur was so sad. The weather was so cold and cold. He took his friends over to watch a movie. After the movie they played volleyball. Arthur was happy that his friends were there to play. {'top_p': 0.9, 'top_k': 50, 'temperature': 1, 'repetition_penalty': 1.05} Arthur goes to the beach. Arthur had always wanted to see the beach. He took his girlfriend out and drove to the beach. He parked his car in the parking lot. Arthur looked outside and saw that the beach was not looking too well. After a couple of hours the car had rolled into a hole. Arthur goes to the beach. Arthur was going to the beach to relax on the beach. He packed his towel and his sunblock. After the sunbathes, Arthur got a tan. He felt much better afterwards. He spent the next day tanning every day with his friends. Arthur goes to the beach. Arthur was about to leave for work. It was a cloudy day outside, but he decided to go into town. He drove home and watched the ocean. Suddenly, a big wave wave hit his car in the road. He didn't get out until days later. Arthur goes to the beach. Arthur is an old man who always wants to go to a beach. He takes some of his friends and family go. He gets his family to drive to the beach. They drive to the beach. He goes to the beach. Arthur goes to the beach. Arthur went to the beach with his friend. They decided to go into a surf park. Once at the beach he had fun swimming. He decided to spend the rest of the day surfing and surfing. His day was wonderful. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.8, 'repetition_penalty': 1.1} Arthur goes to the beach. Arthur is a very lonely man. He decides he wants to meet someone who is nice to him. He finds a man with a beautiful young woman. The two men fall in love and start to date. They realize they are best friends. Arthur goes to the beach. Arthur always wanted to see the ocean. His dad told him that it was too hot. One day Arthur asked his friend Jack if he could take her. Jack agreed to come with him and they drove to the beach. They had fun relaxing in the ocean. Arthur goes to the beach. Arthur is going to go swimming. He has never been to a beach before. Arthur decides to take his friends out to the beach. He takes his friend out to the beach and spends time on the water. Arthur feels glad he went to the beach. Arthur goes to the beach. Arthur is going to the beach. He decides he should go to the beach with his friends. He arrives at the beach with his friends. They spend the day playing volleyball. The kids all go home happy. Arthur goes to the beach. Arthur is very adventurous. He decides to go to the beach. He heads to the beach with his friends. He spends two days relaxing on the beach. He is happy that he got to spend time relaxing on the beach. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.6, 'repetition_penalty': 1.15} Arthur goes to the beach. Arthur is on vacation in Hawaii. He decides to go to the beach with his friends. At the beach he meets a nice girl named Courtney. The two of them meet up for a date and get married. Now they are both happy that he went to the beach. Arthur goes to the beach. Arthur is a very adventurous person. He wants to go to the beach with his friends. They decide to go to the beach to relax. Then, they spend the day relaxing on the beach. Finally, he is able to relax on the beach. Arthur goes to the beach. Arthur was very excited for a relaxing day at the beach. He took his family and friends out on the water. They swam and swam all day long. It was a relaxing day that made him happy. The beach was so relaxing he couldn't be happier. Arthur goes to the beach. Arthur was going to the beach with his family. He had never seen a water park before. They arrived at the beach and started to explore. It was a peaceful day. Afterwards, he decided to go swimming with his friends. Arthur goes to the beach. Arthur was going to go to the beach. He had never seen one before. He decided to drive to the beach. He drove for a couple of hours. Finally he arrived at the beach. {'top_p': 0.9, 'top_k': 40, 'temperature': 0.4, 'repetition_penalty': 1.2} Arthur goes to the beach. Arthur is a very adventurous person. He decides that he needs to get out of his house and go for it. He gets out on the beach with friends. They spend hours swimming in the ocean. He feels happy that he went outside. Arthur goes to the beach. Arthur is going to the beach with his friends. He's going to be in the water for a couple of minutes. When he gets out, it begins to rainy and thunderstorms. The rain starts to pour down on him. He will not be able to swim until after that. Arthur goes to the beach. Arthur is going to go to the beach with his friends. He gets a flat tire on the road. His friend comes over and takes him out. The car begins to drive. They all have fun at the beach. Arthur goes to the beach. Arthur is a very adventurous man who loves swimming in the ocean. He decides that he needs to swim in the ocean at least once per week. At first, he finds it difficult to swim because of his weight. Finally he begins to enjoy the water and relax. After a few weeks, Arthur feels happy that he finally gets to swim. Arthur goes to the beach. Arthur is going to go to the beach with his friends. They decide to play volleyball. He gets a lot of sunblock on his legs and body. The other team wins by a large margin. It's fun playing basketball in the ocean.
meetyildiz/M-TurQA-bert-base-turkish-uncased-finetuned-toqad
47907aa4b8266ab96fa64903631cac770d721d24
2022-06-05T13:43:58.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
meetyildiz
null
meetyildiz/M-TurQA-bert-base-turkish-uncased-finetuned-toqad
1
null
transformers
32,644
Entry not found
meetyildiz/M-TurQA-convbert-base-turkish-cased-finetuned-toqad
2c7a4ceef042031bbd35289d33fae935b53670d0
2022-06-05T13:52:37.000Z
[ "pytorch", "convbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
meetyildiz
null
meetyildiz/M-TurQA-convbert-base-turkish-cased-finetuned-toqad
1
null
transformers
32,645
Entry not found
meetyildiz/M-TurQA-distilbert-base-turkish-cased-finetuned-toqad
c41820cb057176cead26ccd88d8df5d64f53ea8f
2022-06-05T14:00:58.000Z
[ "pytorch", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
meetyildiz
null
meetyildiz/M-TurQA-distilbert-base-turkish-cased-finetuned-toqad
1
null
transformers
32,646
Entry not found
meetyildiz/M-TurQA-bert-base-turkish-cased-finetuned-toqad-aug
784292e471a9caddcf193f32754c8744008ccf6b
2022-06-05T14:51:38.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
meetyildiz
null
meetyildiz/M-TurQA-bert-base-turkish-cased-finetuned-toqad-aug
1
null
transformers
32,647
Entry not found
erfangc/mt5-small-finetuned-amazon-en-es
6a464d94333b537d1a12af0f3197e6c094db9c80
2022-06-05T16:32:46.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erfangc
null
erfangc/mt5-small-finetuned-amazon-en-es
1
null
transformers
32,648
Entry not found
meetyildiz/M-TurQA-xlm-roberta-base-finetuned-toqad-aug
38925d041915ac208c259a3243089e337ddfaa8f
2022-06-05T15:09:49.000Z
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
meetyildiz
null
meetyildiz/M-TurQA-xlm-roberta-base-finetuned-toqad-aug
1
null
transformers
32,649
Entry not found
meetyildiz/M-TurQA-bert-base-turkish-128k-cased-finetuned-toqad-aug
e85b4cc47f0ce95ef2e2936f76e273c2cfedb4ff
2022-06-05T15:31:23.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
meetyildiz
null
meetyildiz/M-TurQA-bert-base-turkish-128k-cased-finetuned-toqad-aug
1
null
transformers
32,650
Entry not found
Bistolero/nl_GA_32b
59faff389c1bc228c07cb295f61019193f84390b
2022-06-05T16:43:13.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Bistolero
null
Bistolero/nl_GA_32b
1
null
transformers
32,651
Entry not found
AllenGeng/OCamlBert
acec8b008f7533942b8fcd145e47c95fb7953e42
2022-06-06T18:28:40.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
false
AllenGeng
null
AllenGeng/OCamlBert
1
null
transformers
32,652
--- license: mit ---
daianadte/roberta-NLI-DMPV
9c9265f2008a29e14c965e93fd457954a4880663
2022-06-05T21:40:10.000Z
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
false
daianadte
null
daianadte/roberta-NLI-DMPV
1
null
transformers
32,653
Entry not found
asahi417/lmqg-mt5-small-itquad
48748708e370e34cdb11bb07fbe003c7f0f37997
2022-06-09T10:59:30.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
asahi417
null
asahi417/lmqg-mt5-small-itquad
1
null
transformers
32,654
Entry not found
erfangc/mt5-small-sandbox1
4d7d6e36e31ef6e42f6d9982261d64925624a59f
2022-06-06T03:10:37.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "summarization", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
summarization
false
erfangc
null
erfangc/mt5-small-sandbox1
1
null
transformers
32,655
--- license: apache-2.0 tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-sandbox1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-sandbox1 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 14.5875 - Rouge1: 0.0 - Rouge2: 0.0 - Rougel: 0.0 - Rougelsum: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results ### Framework versions - Transformers 4.15.0 - Pytorch 1.11.0 - Datasets 2.2.2 - Tokenizers 0.10.3
joshanashakya/old_mini_codebert_sourcecode_nmt_pn2ja_200E_5e-05LR
318160abee826808b3eea0b21add261cbd6351f9
2022-06-06T04:34:26.000Z
[ "pytorch", "encoder-decoder", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
joshanashakya
null
joshanashakya/old_mini_codebert_sourcecode_nmt_pn2ja_200E_5e-05LR
1
null
transformers
32,656
Entry not found
eunjin/kobart_jeju_translator
e4b8349584f1031ffcc3d0fc12c5e6c223706e48
2022-06-06T13:44:49.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
eunjin
null
eunjin/kobart_jeju_translator
1
null
transformers
32,657
Korean Dialect Translator: Standard > Jeju - Used Data : AI hub 한국어 방언 발화(제주도) - Used Model : SKT-KoBART - https://github.com/SKT-AI/KoBART - Reference Code - https://github.com/seujung/KoBART-translation
eunjin/kobart_jeju_to_standard_translator
e2c24189d441f0ac939ad2ffc0d0df4163914830
2022-06-06T13:44:31.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
eunjin
null
eunjin/kobart_jeju_to_standard_translator
1
null
transformers
32,658
Korean Dialect Translator: Jeju > Standard - Used Data : AI hub 한국어 방언 발화(제주도) - Used Model : SKT-KoBART - https://github.com/SKT-AI/KoBART - Reference Code - https://github.com/seujung/KoBART-translation
eunjin/kobart_gyeongsang_to_standard_translator
c211f446be3858027978791dd70e0fe23caad1b9
2022-06-06T13:44:06.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
eunjin
null
eunjin/kobart_gyeongsang_to_standard_translator
1
null
transformers
32,659
Korean Dialect Translator: Gyeongsang > Standard - Used Data : AI hub 한국어 방언 발화(경상도) - Used Model : SKT-KoBART - https://github.com/SKT-AI/KoBART - Reference Code - https://github.com/seujung/KoBART-translation
joshanashakya/old_codebert_sourcecode_nmt_ja2pn_50E_5e-05LR
eaf1851c514a5b799ba077ebd1015cda08ffab0d
2022-06-06T07:07:17.000Z
[ "pytorch", "encoder-decoder", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
joshanashakya
null
joshanashakya/old_codebert_sourcecode_nmt_ja2pn_50E_5e-05LR
1
null
transformers
32,660
Entry not found
botika/distilbert-base-uncased-finetuned-squad
8c11a47cf871eeee5184e189f2ec150ad8250f30
2022-06-07T06:36:08.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
botika
null
botika/distilbert-base-uncased-finetuned-squad
1
null
transformers
32,661
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.1500 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.3149 | 1.0 | 2767 | 1.2079 | | 1.053 | 2.0 | 5534 | 1.1408 | | 0.8809 | 3.0 | 8301 | 1.1500 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu102 - Datasets 2.2.2 - Tokenizers 0.12.1
VRT/mT5_summarization
bcc20058413439d84bc8d3e59c4deb30517831c2
2022-06-06T10:17:46.000Z
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
VRT
null
VRT/mT5_summarization
1
null
transformers
32,662
Entry not found
eunbeee/ainize-kobart-news-eb-finetuned-xsum
cd0990000c901d7f9e68edd0b459b95b8cf852a7
2022-06-08T08:34:26.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
eunbeee
null
eunbeee/ainize-kobart-news-eb-finetuned-xsum
1
null
transformers
32,663
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: ainize-kobart-news-eb-finetuned-xsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ainize-kobart-news-eb-finetuned-xsum This model is a fine-tuned version of [ainize/kobart-news](https://huggingface.co/ainize/kobart-news) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2147 - Rouge1: 60.732 - Rouge2: 39.1933 - Rougel: 60.6507 - Rougelsum: 60.6712 - Gen Len: 19.3417 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.0649 | 1.0 | 749 | 0.5502 | 56.6571 | 36.5992 | 56.6185 | 56.6364 | 19.2929 | | 0.7103 | 2.0 | 1498 | 0.3904 | 59.1212 | 38.3611 | 59.093 | 59.1191 | 19.31 | | 0.4723 | 3.0 | 2247 | 0.2922 | 60.1133 | 38.7819 | 60.0439 | 60.0572 | 19.2659 | | 0.3841 | 4.0 | 2996 | 0.2367 | 60.4405 | 39.0176 | 60.366 | 60.4057 | 19.3397 | | 0.3091 | 5.0 | 3745 | 0.2147 | 60.732 | 39.1933 | 60.6507 | 60.6712 | 19.3417 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
spy24/autotrain-expand-928531583
05bed7971d189e0c0293a8db47cfce52ac1f6902
2022-06-06T16:04:02.000Z
[ "pytorch", "pegasus", "text2text-generation", "en", "dataset:spy24/autotrain-data-expand", "transformers", "autotrain", "co2_eq_emissions", "autotrain_compatible" ]
text2text-generation
false
spy24
null
spy24/autotrain-expand-928531583
1
1
transformers
32,664
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - spy24/autotrain-data-expand co2_eq_emissions: 3.4552892403407167 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 928531583 - CO2 Emissions (in grams): 3.4552892403407167 ## Validation Metrics - Loss: 2.1122372150421143 - Rouge1: 68.7226 - Rouge2: 50.1638 - RougeL: 59.7235 - RougeLsum: 62.3458 - Gen Len: 63.2505 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/spy24/autotrain-expand-928531583 ```
jontooy/AraBERT256-Flickr8k
ca2a9af0752584e24e2d4aca3ce6d8891117aa82
2022-06-06T12:21:07.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "license:afl-3.0", "autotrain_compatible" ]
fill-mask
false
jontooy
null
jontooy/AraBERT256-Flickr8k
1
null
transformers
32,665
--- license: afl-3.0 ---
jontooy/GigaBERT32-COCO
30d1a9754ea211b973145879367bceb631452633
2022-06-06T12:25:18.000Z
[ "pytorch", "bert", "feature-extraction", "transformers", "license:afl-3.0" ]
feature-extraction
false
jontooy
null
jontooy/GigaBERT32-COCO
1
null
transformers
32,666
--- license: afl-3.0 ---
VRT/mT5_initial
a1b3fe9bfbce847415d99d5a6140369e629cdd74
2022-06-06T14:31:41.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
VRT
null
VRT/mT5_initial
1
null
transformers
32,667
Entry not found
stopdoingmath/opus-mt-sla-en-finetuned-uk-to-en
96fddf47fc22e7ee19b9b2deda6f9c6e9ccdd2bc
2022-06-06T17:20:17.000Z
[ "pytorch", "tensorboard", "marian", "text2text-generation", "dataset:opus100", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
stopdoingmath
null
stopdoingmath/opus-mt-sla-en-finetuned-uk-to-en
1
null
transformers
32,668
--- license: apache-2.0 tags: - generated_from_trainer datasets: - opus100 metrics: - bleu model-index: - name: opus-mt-sla-en-finetuned-uk-to-en results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: opus100 type: opus100 args: default metrics: - name: Bleu type: bleu value: 27.7684 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-sla-en-finetuned-uk-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-sla-en](https://huggingface.co/Helsinki-NLP/opus-mt-sla-en) on the opus100 dataset. It achieves the following results on the evaluation set: - Loss: 1.7232 - Bleu: 27.7684 - Gen Len: 12.2485 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 1.5284 | 1.0 | 62500 | 1.7232 | 27.7684 | 12.2485 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
huggingtweets/russellriesjr
e9623f17e00fddbb9b2228dbde1f5e7d89846582
2022-06-06T18:54:21.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/russellriesjr
1
null
transformers
32,669
--- language: en thumbnail: http://www.huggingtweets.com/russellriesjr/1654541578565/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1468670117357789192/sStrLB1i_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Russell Ries Jr.</div> <div style="text-align: center; font-size: 14px;">@russellriesjr</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Russell Ries Jr.. | Data | Russell Ries Jr. | | --- | --- | | Tweets downloaded | 3236 | | Retweets | 2163 | | Short tweets | 135 | | Tweets kept | 938 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1hn2gsci/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @russellriesjr's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/378xjgs6) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/378xjgs6/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/russellriesjr') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
erickfm/t5-small-finetuned-bias-sweep
8a9440892900fd941788a399a34f3cb50bb745ee
2022-06-07T03:47:56.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep
1
null
transformers
32,670
Entry not found
asahi417/lmqg-mt5-small-ruquad
572fbfbfdc9a3dce565f1ab441538802d55ac7a8
2022-06-09T10:51:26.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
asahi417
null
asahi417/lmqg-mt5-small-ruquad
1
null
transformers
32,671
Entry not found
huggingtweets/hopedavistweets
f0e47b6abbd2a7770e0996308bc86dca2ac0e4a0
2022-06-07T00:48:38.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/hopedavistweets
1
null
transformers
32,672
--- language: en thumbnail: http://www.huggingtweets.com/hopedavistweets/1654562883505/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1420954294082326529/ZkxWu0ln_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Hope Davis 🪩</div> <div style="text-align: center; font-size: 14px;">@hopedavistweets</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Hope Davis 🪩. | Data | Hope Davis 🪩 | | --- | --- | | Tweets downloaded | 2707 | | Retweets | 1812 | | Short tweets | 100 | | Tweets kept | 795 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2pkx13m4/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @hopedavistweets's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/objxokv4) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/objxokv4/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/hopedavistweets') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
huggingtweets/sofiaazeman
7e84bdc63861225acadad7085974b9efb04e4d34
2022-06-07T00:53:43.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/sofiaazeman
1
null
transformers
32,673
--- language: en thumbnail: http://www.huggingtweets.com/sofiaazeman/1654563180290/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1511483454495637510/BWEFnW4O_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Sofi Zeman</div> <div style="text-align: center; font-size: 14px;">@sofiaazeman</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Sofi Zeman. | Data | Sofi Zeman | | --- | --- | | Tweets downloaded | 317 | | Retweets | 158 | | Short tweets | 26 | | Tweets kept | 133 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/3uxm4ug9/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @sofiaazeman's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/6819mjpo) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/6819mjpo/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/sofiaazeman') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
huggingtweets/sophiadonis10
c3427af7857c496940f60b69cf5d7cdc4f9691ff
2022-06-07T01:01:18.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/sophiadonis10
1
null
transformers
32,674
--- language: en thumbnail: http://www.huggingtweets.com/sophiadonis10/1654563613795/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1475251222802309123/0V1B7h3p_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Sophia Donis</div> <div style="text-align: center; font-size: 14px;">@sophiadonis10</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Sophia Donis. | Data | Sophia Donis | | --- | --- | | Tweets downloaded | 320 | | Retweets | 113 | | Short tweets | 5 | | Tweets kept | 202 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/4gt337he/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @sophiadonis10's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2u0jynrk) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2u0jynrk/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/sophiadonis10') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Nithiwat/wangchanberta-base-att-spm-uncased-finetuned-imdb
856e6975414b14881020ae575a71e868da2385f1
2022-06-07T01:25:53.000Z
[ "pytorch", "tensorboard", "camembert", "fill-mask", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
fill-mask
false
Nithiwat
null
Nithiwat/wangchanberta-base-att-spm-uncased-finetuned-imdb
1
null
transformers
32,675
--- tags: - generated_from_trainer model-index: - name: wangchanberta-base-att-spm-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wangchanberta-base-att-spm-uncased-finetuned-imdb This model is a fine-tuned version of [airesearch/wangchanberta-base-att-spm-uncased](https://huggingface.co/airesearch/wangchanberta-base-att-spm-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.5910 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.9341 | 1.0 | 295 | 2.6511 | | 2.8093 | 2.0 | 590 | 2.6178 | | 2.7689 | 3.0 | 885 | 2.5321 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
dmpv/siamesa-concat-dmpv
ac222fa460890da6e82be20a8a92074001b2c896
2022-06-07T01:47:45.000Z
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
false
dmpv
null
dmpv/siamesa-concat-dmpv
1
null
transformers
32,676
Entry not found
erickfm/t5-base-finetuned-bias-v7
a2b396607dcb8ed40b1c52c673ccbd252006b1c2
2022-06-07T02:56:47.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-base-finetuned-bias-v7
1
null
transformers
32,677
Entry not found
SmartPy/fine-tuned-t5-small-accelerate
378102d795864e0ae310d17c909c78aeb43a69ca
2022-06-07T06:40:32.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
SmartPy
null
SmartPy/fine-tuned-t5-small-accelerate
1
1
transformers
32,678
Entry not found
nestoralvaro/mt5-base-finetuned-xsum-mlsum___topic_text_google_mt5_base
d718b324c78f0d0b729687262e8fe9c784235cfd
2022-06-07T09:56:14.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "dataset:mlsum", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
nestoralvaro
null
nestoralvaro/mt5-base-finetuned-xsum-mlsum___topic_text_google_mt5_base
1
null
transformers
32,679
--- license: apache-2.0 tags: - generated_from_trainer datasets: - mlsum metrics: - rouge model-index: - name: mt5-base-finetuned-xsum-mlsum___topic_text_google_mt5_base results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: mlsum type: mlsum args: es metrics: - name: Rouge1 type: rouge value: 0.1582 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-base-finetuned-xsum-mlsum___topic_text_google_mt5_base This model is a fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) on the mlsum dataset. It achieves the following results on the evaluation set: - Loss: nan - Rouge1: 0.1582 - Rouge2: 0.0133 - Rougel: 0.1585 - Rougelsum: 0.1586 - Gen Len: 10.2326 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 0.0 | 1.0 | 66592 | nan | 0.1582 | 0.0133 | 0.1585 | 0.1586 | 10.2326 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Keerthana/wav2vec2-large-xls-r-300m-ta-colab
508b62985394394910b452c0256e6123b980c634
2022-06-09T13:39:44.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
Keerthana
null
Keerthana/wav2vec2-large-xls-r-300m-ta-colab
1
null
transformers
32,680
Entry not found
anjankumar/mbart-large-50-finetuned-en-to-te
508cfb05aece7d4db2f3801a7d1f91026a491089
2022-06-19T16:32:07.000Z
[ "pytorch", "tensorboard", "mbart", "text2text-generation", "dataset:kde4", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
anjankumar
null
anjankumar/mbart-large-50-finetuned-en-to-te
1
null
transformers
32,681
--- tags: - generated_from_trainer datasets: - kde4 metrics: - bleu model-index: - name: mbart-large-50-finetuned-en-to-te results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: kde4 type: kde4 args: en-te metrics: - name: Bleu type: bleu value: 0.7152 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-large-50-finetuned-en-to-te This model is a fine-tuned version of [facebook/mbart-large-50](https://huggingface.co/facebook/mbart-large-50) on the kde4 dataset. It achieves the following results on the evaluation set: - Loss: 13.8521 - Bleu: 0.7152 - Gen Len: 20.5 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:| | No log | 1.0 | 7 | 13.8521 | 0.7152 | 20.5 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
twieland/VN_ja-en_helsinki
c21b586f12c85f8df24be87a29749e7d5a81ee75
2022-06-07T08:55:20.000Z
[ "pytorch", "marian", "text2text-generation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
twieland
null
twieland/VN_ja-en_helsinki
1
null
transformers
32,682
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: VN_ja-en_helsinki results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # VN_ja-en_helsinki This model is a fine-tuned version of [Helsinki-NLP/opus-mt-ja-en](https://huggingface.co/Helsinki-NLP/opus-mt-ja-en) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.2409 - BLEU: 15.28 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 2.6165 | 0.19 | 2000 | 2.6734 | | 2.3805 | 0.39 | 4000 | 2.6047 | | 2.2793 | 0.58 | 6000 | 2.5461 | | 2.2028 | 0.78 | 8000 | 2.5127 | | 2.1361 | 0.97 | 10000 | 2.4511 | | 1.9653 | 1.17 | 12000 | 2.4331 | | 1.934 | 1.36 | 14000 | 2.3840 | | 1.9002 | 1.56 | 16000 | 2.3901 | | 1.87 | 1.75 | 18000 | 2.3508 | | 1.8408 | 1.95 | 20000 | 2.3082 | | 1.6937 | 2.14 | 22000 | 2.3279 | | 1.6371 | 2.34 | 24000 | 2.3052 | | 1.6264 | 2.53 | 26000 | 2.3071 | | 1.6029 | 2.72 | 28000 | 2.2685 | | 1.5847 | 2.92 | 30000 | 2.2409 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
erickfm/t5-small-finetuned-bias-sweep-2302e2a8
cd378db3a779c4b2217ddcbafcc7f04f236dc44e
2022-06-07T07:41:23.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-2302e2a8
1
null
transformers
32,683
Entry not found
erickfm/t5-small-finetuned-bias-sweep-6ca1c8f4
eee00ba998620a06648f6d406a0943640e955c41
2022-06-07T08:43:12.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-6ca1c8f4
1
null
transformers
32,684
Entry not found
twieland/LN_ja-en_helsinki
1d1c529468ea6a9c60ed95912aa279481e24998c
2022-06-07T22:34:00.000Z
[ "pytorch", "marian", "text2text-generation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
twieland
null
twieland/LN_ja-en_helsinki
1
null
transformers
32,685
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: LN_ja-en_helsinki results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # LN_ja-en_helsinki This model is a fine-tuned version of [Helsinki-NLP/opus-mt-ja-en](https://huggingface.co/Helsinki-NLP/opus-mt-ja-en) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.5382 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:------:|:---------------:| | 2.5108 | 0.02 | 2000 | 2.8405 | | 2.2729 | 0.04 | 4000 | 2.7758 | | 2.1673 | 0.06 | 6000 | 2.7098 | | 2.0981 | 0.08 | 8000 | 2.6487 | | 2.048 | 0.1 | 10000 | 2.7008 | | 2.0077 | 0.12 | 12000 | 2.6614 | | 1.9677 | 0.13 | 14000 | 2.6227 | | 1.9445 | 0.15 | 16000 | 2.5895 | | 1.9137 | 0.17 | 18000 | 2.5897 | | 1.8911 | 0.19 | 20000 | 2.6771 | | 1.8695 | 0.21 | 22000 | 2.6332 | | 1.8479 | 0.23 | 24000 | 2.6130 | | 1.8378 | 0.25 | 26000 | 2.6518 | | 1.8191 | 0.27 | 28000 | 2.6401 | | 1.8024 | 0.29 | 30000 | 2.6617 | | 1.7933 | 0.31 | 32000 | 2.6705 | | 1.7787 | 0.33 | 34000 | 2.6280 | | 1.7661 | 0.35 | 36000 | 2.6911 | | 1.7514 | 0.36 | 38000 | 2.6766 | | 1.7444 | 0.38 | 40000 | 2.6996 | | 1.7363 | 0.4 | 42000 | 2.6276 | | 1.722 | 0.42 | 44000 | 2.6466 | | 1.7177 | 0.44 | 46000 | 2.5937 | | 1.7055 | 0.46 | 48000 | 2.6386 | | 1.6956 | 0.48 | 50000 | 2.6794 | | 1.6885 | 0.5 | 52000 | 2.7336 | | 1.679 | 0.52 | 54000 | 2.7266 | | 1.6715 | 0.54 | 56000 | 2.6945 | | 1.6666 | 0.56 | 58000 | 2.7111 | | 1.6599 | 0.58 | 60000 | 2.7205 | | 1.6566 | 0.59 | 62000 | 2.7194 | | 1.6481 | 0.61 | 64000 | 2.6582 | | 1.6434 | 0.63 | 66000 | 2.6859 | | 1.6315 | 0.65 | 68000 | 2.7058 | | 1.6258 | 0.67 | 70000 | 2.7428 | | 1.6189 | 0.69 | 72000 | 2.7411 | | 1.6169 | 0.71 | 74000 | 2.7039 | | 1.6087 | 0.73 | 76000 | 2.6844 | | 1.6021 | 0.75 | 78000 | 2.6454 | | 1.6034 | 0.77 | 80000 | 2.6596 | | 1.5941 | 0.79 | 82000 | 2.6903 | | 1.5862 | 0.81 | 84000 | 2.7099 | | 1.5836 | 0.83 | 86000 | 2.6929 | | 1.5827 | 0.84 | 88000 | 2.7181 | | 1.5747 | 0.86 | 90000 | 2.6888 | | 1.5678 | 0.88 | 92000 | 2.6662 | | 1.5643 | 0.9 | 94000 | 2.6663 | | 1.561 | 0.92 | 96000 | 2.6699 | | 1.5565 | 0.94 | 98000 | 2.6667 | | 1.5501 | 0.96 | 100000 | 2.6828 | | 1.5476 | 0.98 | 102000 | 2.6531 | | 1.5444 | 1.0 | 104000 | 2.6799 | | 1.5057 | 1.02 | 106000 | 2.6525 | | 1.5003 | 1.04 | 108000 | 2.6996 | | 1.4996 | 1.06 | 110000 | 2.6649 | | 1.4996 | 1.07 | 112000 | 2.6974 | | 1.4966 | 1.09 | 114000 | 2.7594 | | 1.4967 | 1.11 | 116000 | 2.6966 | | 1.492 | 1.13 | 118000 | 2.6929 | | 1.4923 | 1.15 | 120000 | 2.6522 | | 1.4838 | 1.17 | 122000 | 2.6363 | | 1.4839 | 1.19 | 124000 | 2.6849 | | 1.4807 | 1.21 | 126000 | 2.6667 | | 1.4778 | 1.23 | 128000 | 2.6684 | | 1.4731 | 1.25 | 130000 | 2.6338 | | 1.4727 | 1.27 | 132000 | 2.6093 | | 1.4695 | 1.29 | 134000 | 2.6020 | | 1.4656 | 1.3 | 136000 | 2.6341 | | 1.4648 | 1.32 | 138000 | 2.6509 | | 1.4578 | 1.34 | 140000 | 2.6807 | | 1.4606 | 1.36 | 142000 | 2.6357 | | 1.4529 | 1.38 | 144000 | 2.6404 | | 1.4488 | 1.4 | 146000 | 2.6347 | | 1.4442 | 1.42 | 148000 | 2.6058 | | 1.4447 | 1.44 | 150000 | 2.6645 | | 1.4432 | 1.46 | 152000 | 2.6070 | | 1.437 | 1.48 | 154000 | 2.5987 | | 1.4345 | 1.5 | 156000 | 2.6309 | | 1.43 | 1.52 | 158000 | 2.5947 | | 1.4301 | 1.54 | 160000 | 2.5938 | | 1.4267 | 1.55 | 162000 | 2.6146 | | 1.426 | 1.57 | 164000 | 2.6519 | | 1.4193 | 1.59 | 166000 | 2.6163 | | 1.416 | 1.61 | 168000 | 2.5793 | | 1.4146 | 1.63 | 170000 | 2.6031 | | 1.4091 | 1.65 | 172000 | 2.5826 | | 1.4067 | 1.67 | 174000 | 2.5891 | | 1.4081 | 1.69 | 176000 | 2.6006 | | 1.4023 | 1.71 | 178000 | 2.5697 | | 1.4003 | 1.73 | 180000 | 2.5633 | | 1.3986 | 1.75 | 182000 | 2.5494 | | 1.3924 | 1.77 | 184000 | 2.5577 | | 1.3931 | 1.78 | 186000 | 2.5888 | | 1.3851 | 1.8 | 188000 | 2.5716 | | 1.3869 | 1.82 | 190000 | 2.5570 | | 1.3825 | 1.84 | 192000 | 2.5702 | | 1.3787 | 1.86 | 194000 | 2.5754 | | 1.3738 | 1.88 | 196000 | 2.5901 | | 1.3734 | 1.9 | 198000 | 2.5374 | | 1.3693 | 1.92 | 200000 | 2.5897 | | 1.3703 | 1.94 | 202000 | 2.5422 | | 1.3685 | 1.96 | 204000 | 2.5825 | | 1.3664 | 1.98 | 206000 | 2.5201 | | 1.3607 | 2.0 | 208000 | 2.5733 | | 1.3217 | 2.02 | 210000 | 2.5879 | | 1.31 | 2.03 | 212000 | 2.5777 | | 1.3125 | 2.05 | 214000 | 2.5724 | | 1.3084 | 2.07 | 216000 | 2.5968 | | 1.3087 | 2.09 | 218000 | 2.5976 | | 1.3063 | 2.11 | 220000 | 2.5969 | | 1.3057 | 2.13 | 222000 | 2.6353 | | 1.3067 | 2.15 | 224000 | 2.6147 | | 1.3013 | 2.17 | 226000 | 2.5897 | | 1.3018 | 2.19 | 228000 | 2.5783 | | 1.2968 | 2.21 | 230000 | 2.6172 | | 1.2975 | 2.23 | 232000 | 2.6180 | | 1.2946 | 2.25 | 234000 | 2.6192 | | 1.299 | 2.26 | 236000 | 2.5895 | | 1.2896 | 2.28 | 238000 | 2.5682 | | 1.287 | 2.3 | 240000 | 2.5653 | | 1.2902 | 2.32 | 242000 | 2.5501 | | 1.2862 | 2.34 | 244000 | 2.5747 | | 1.2841 | 2.36 | 246000 | 2.5654 | | 1.2838 | 2.38 | 248000 | 2.5703 | | 1.2813 | 2.4 | 250000 | 2.5919 | | 1.2778 | 2.42 | 252000 | 2.5552 | | 1.2821 | 2.44 | 254000 | 2.5603 | | 1.2729 | 2.46 | 256000 | 2.5455 | | 1.2718 | 2.48 | 258000 | 2.5688 | | 1.2729 | 2.49 | 260000 | 2.5574 | | 1.2699 | 2.51 | 262000 | 2.5468 | | 1.2677 | 2.53 | 264000 | 2.5704 | | 1.2647 | 2.55 | 266000 | 2.5665 | | 1.2628 | 2.57 | 268000 | 2.5594 | | 1.2636 | 2.59 | 270000 | 2.5426 | | 1.2573 | 2.61 | 272000 | 2.5666 | | 1.2576 | 2.63 | 274000 | 2.5580 | | 1.2511 | 2.65 | 276000 | 2.5742 | | 1.2513 | 2.67 | 278000 | 2.5646 | | 1.2495 | 2.69 | 280000 | 2.5669 | | 1.2472 | 2.71 | 282000 | 2.5700 | | 1.2478 | 2.73 | 284000 | 2.5496 | | 1.2471 | 2.74 | 286000 | 2.5335 | | 1.2436 | 2.76 | 288000 | 2.5315 | | 1.2411 | 2.78 | 290000 | 2.5302 | | 1.2391 | 2.8 | 292000 | 2.5290 | | 1.2352 | 2.82 | 294000 | 2.5303 | | 1.2332 | 2.84 | 296000 | 2.5412 | | 1.233 | 2.86 | 298000 | 2.5523 | | 1.2298 | 2.88 | 300000 | 2.5524 | | 1.2285 | 2.9 | 302000 | 2.5517 | | 1.2297 | 2.92 | 304000 | 2.5419 | | 1.2256 | 2.94 | 306000 | 2.5404 | | 1.2239 | 2.96 | 308000 | 2.5390 | | 1.2264 | 2.97 | 310000 | 2.5364 | | 1.2259 | 2.99 | 312000 | 2.5382 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
erickfm/t5-small-finetuned-bias-sweep-18dcbe1c
295a8d6071e086e3fe670e7fc26bdd3f566b419a
2022-06-07T09:14:47.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-18dcbe1c
1
null
transformers
32,686
Entry not found
prashanth/IndicBART-ibart-hi-to-en
56bb36870ea59b6674124f6235365420cc3df5e9
2022-06-07T09:33:58.000Z
[ "pytorch", "tensorboard", "mbart", "text2text-generation", "dataset:hindi_english_machine_translation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
prashanth
null
prashanth/IndicBART-ibart-hi-to-en
1
null
transformers
32,687
--- tags: - generated_from_trainer datasets: - hindi_english_machine_translation model-index: - name: IndicBART-ibart-hi-to-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IndicBART-ibart-hi-to-en This model is a fine-tuned version of [ai4bharat/IndicBART](https://huggingface.co/ai4bharat/IndicBART) on the hindi_english_machine_translation dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:| | No log | 1.0 | 157 | 4.4208 | 1.0626 | 20.0 | ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu102 - Datasets 1.18.0 - Tokenizers 0.12.1
ishansharma1320/wav2vec2-large-xls-r-300m-finetuned-hindi-common-voice-9-0
d9e503a145087c3fae94085541561ce307db8729
2022-06-07T20:08:08.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
ishansharma1320
null
ishansharma1320/wav2vec2-large-xls-r-300m-finetuned-hindi-common-voice-9-0
1
null
transformers
32,688
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-finetuned-hindi-common-voice-9-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-finetuned-hindi-common-voice-9-0 This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.7392 - Wer: 1.0141 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4.42184e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 9.2217 | 3.03 | 400 | 4.0314 | 1.0 | | 3.2902 | 6.06 | 800 | 2.1356 | 1.0001 | | 0.9858 | 9.09 | 1200 | 0.8566 | 1.0037 | | 0.5131 | 12.12 | 1600 | 0.7481 | 1.0074 | | 0.3781 | 15.15 | 2000 | 0.7437 | 1.008 | | 0.2998 | 18.18 | 2400 | 0.7310 | 1.0162 | | 0.2553 | 21.21 | 2800 | 0.7384 | 1.0159 | | 0.2216 | 24.24 | 3200 | 0.7537 | 1.0100 | | 0.2048 | 27.27 | 3600 | 0.7392 | 1.0141 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 2.2.2 - Tokenizers 0.10.3
erickfm/t5-small-finetuned-bias-sweep-ddee5fc3
bbe6f1e4e93b8c15bc95d499763284a941ecd81f
2022-06-07T11:40:11.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-ddee5fc3
1
null
transformers
32,689
Entry not found
xfbai/AMRBART-base-finetuned-AMR2.0-AMRParsing
4c27652746c9ea5125332707d971b20ea57eee32
2022-06-07T12:21:05.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "license:mit", "autotrain_compatible" ]
text2text-generation
false
xfbai
null
xfbai/AMRBART-base-finetuned-AMR2.0-AMRParsing
1
null
transformers
32,690
--- license: mit ---
xfbai/AMRBART-base-finetuned-AMR3.0-AMRParsing
9f9dcbf75fa7eebea5203856481a5f6ab05e21f5
2022-06-07T12:52:21.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "license:mit", "autotrain_compatible" ]
text2text-generation
false
xfbai
null
xfbai/AMRBART-base-finetuned-AMR3.0-AMRParsing
1
null
transformers
32,691
--- license: mit ---
erickfm/t5-small-finetuned-bias-sweep-1436a0b1
a63de9ba13a86e3cf12627f499afbc2984f03397
2022-06-07T12:56:53.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-1436a0b1
1
null
transformers
32,692
Entry not found
erickfm/t5-small-finetuned-bias-sweep-d74f666c
5195831bb5dfe47b1a91b5f1021fdb1167ae4bc5
2022-06-07T13:20:27.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-d74f666c
1
null
transformers
32,693
Entry not found
EthanChen0418/task2_macbert_multi_class
56a40e5b839292639a62de1b29404327c9087a70
2022-06-07T14:05:50.000Z
[ "pytorch", "bert", "transformers" ]
null
false
EthanChen0418
null
EthanChen0418/task2_macbert_multi_class
1
null
transformers
32,694
Entry not found
enoriega/rule_learning_margin_test
c4ab5569aca5ca6b5d775231b88d1ea173bb1da7
2022-06-08T05:00:59.000Z
[ "pytorch", "tensorboard", "bert", "dataset:enoriega/odinsynth_dataset", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
null
false
enoriega
null
enoriega/rule_learning_margin_test
1
null
transformers
32,695
--- license: apache-2.0 tags: - generated_from_trainer datasets: - enoriega/odinsynth_dataset model-index: - name: rule_learning_margin_test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rule_learning_margin_test This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the enoriega/odinsynth_dataset dataset. It achieves the following results on the evaluation set: - Loss: 0.4104 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 2000 - total_train_batch_size: 8000 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.6468 | 0.32 | 20 | 0.6191 | | 0.5185 | 0.64 | 40 | 0.5083 | | 0.459 | 0.96 | 60 | 0.4521 | | 0.4352 | 1.29 | 80 | 0.4192 | | 0.4427 | 1.61 | 100 | 0.4199 | | 0.4246 | 1.93 | 120 | 0.4131 | | 0.4301 | 2.26 | 140 | 0.4104 | | 0.428 | 2.58 | 160 | 0.4099 | | 0.4161 | 2.9 | 180 | 0.4102 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0 - Datasets 2.2.1 - Tokenizers 0.12.1
brindap/wav2vec2-large-xls-r-300m-ta-colab
6035ea0a7221d174e10f9ddbfca632133e0f9dea
2022-06-12T06:38:33.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
brindap
null
brindap/wav2vec2-large-xls-r-300m-ta-colab
1
null
transformers
32,696
Entry not found
erickfm/t5-small-finetuned-bias-sweep-ec6be410
892784c992d373a3aeb06d2ed81325290fa6b6c4
2022-06-07T17:07:53.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-ec6be410
1
null
transformers
32,697
Entry not found
erickfm/t5-small-finetuned-bias-sweep-692e0c16
092dc0ce1b62de56e9378126daa52c8b781537ef
2022-06-07T18:27:37.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-692e0c16
1
null
transformers
32,698
Entry not found
erickfm/t5-small-finetuned-bias-sweep-e5d0bea8
397d124e8d31923ec1c478538485357ad99a5e71
2022-06-07T20:17:08.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/t5-small-finetuned-bias-sweep-e5d0bea8
1
null
transformers
32,699
Entry not found