modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
PSW/low_resource_percent1_max2swap_seed1
5a41a281c6a2c27a788119f391be6f8e69a3a64d
2022-05-12T06:27:40.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent1_max2swap_seed1
1
null
transformers
31,800
Entry not found
PSW/low_resource_percent1_max2swap_seed27
a95562fae23b38c4c2da2754ec69fb02175769df
2022-05-12T06:40:35.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent1_max2swap_seed27
1
null
transformers
31,801
Entry not found
PSW/low_resource_percent10_min2swap_seed1
d7563ff675308be7c7fec18e83e8aa7be1966074
2022-05-12T07:11:08.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent10_min2swap_seed1
1
null
transformers
31,802
Entry not found
PSW/low_resource_percent10_min2swap_seed27
d9896007a9a965391b8004a2e78fc38b866fb90a
2022-05-12T07:27:12.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent10_min2swap_seed27
1
null
transformers
31,803
Entry not found
SebastianS/mt5-finetuned-amazon-en-es-accelerate
01f0dfcb3c541bd6018e009a92c8dfa201abf1ee
2022-05-11T22:10:31.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
SebastianS
null
SebastianS/mt5-finetuned-amazon-en-es-accelerate
1
null
transformers
31,804
Entry not found
PSW/low_resource_percent10_max2swap_seed1
71c515c191fbe6a438ea6aff87415f6f00ebf4c3
2022-05-12T07:59:11.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent10_max2swap_seed1
1
null
transformers
31,805
Entry not found
PSW/low_resource_percent10_max2swap_seed27
a2efd16d231b25ed304b5c50803016bee991e9c7
2022-05-12T08:15:57.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent10_max2swap_seed27
1
null
transformers
31,806
Entry not found
PSW/low_resource_percent10_max2swap_seed42
2c59a9ca1c935516dd357f5a91475d101fa98138
2022-05-12T08:31:53.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent10_max2swap_seed42
1
null
transformers
31,807
Entry not found
PSW/low_resource_percent20_min2swap_seed27
49b4d318220efa834905d703b6b95911a4cb50ec
2022-05-12T09:12:00.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent20_min2swap_seed27
1
null
transformers
31,808
Entry not found
PSW/low_resource_percent20_max2swap_seed1
4d1956a081906723952177be0680606a7e9ad678
2022-05-12T09:53:46.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent20_max2swap_seed1
1
null
transformers
31,809
Entry not found
PSW/low_resource_percent20_max2swap_seed42
e75f5910f82dce973c4020d98254e7e690daa886
2022-05-12T10:29:02.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent20_max2swap_seed42
1
null
transformers
31,810
Entry not found
tanviraumi/summary-note
d1c021c3f6c2b5c645221c8eef2997567ea50c7c
2022-05-11T22:09:53.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "license:mit", "autotrain_compatible" ]
text2text-generation
false
tanviraumi
null
tanviraumi/summary-note
1
null
transformers
31,811
--- license: mit ---
enoriega/kw_pubmed_5000_0.000006
3c508600fdc2be988dd373cb6f1be8b4ccd1defb
2022-05-12T09:50:50.000Z
[ "pytorch", "tensorboard", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
enoriega
null
enoriega/kw_pubmed_5000_0.000006
1
null
transformers
31,812
Entry not found
SherlockGuo/distilbert-base-uncased-finetuned-squad
aaa4418820d68d665ff6b542d6cd76a8f3111de8
2022-05-12T19:32:44.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
SherlockGuo
null
SherlockGuo/distilbert-base-uncased-finetuned-squad
1
null
transformers
31,813
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 3.7677 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 63 | 4.1121 | | No log | 2.0 | 126 | 3.8248 | | No log | 3.0 | 189 | 3.7677 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
uhlenbeckmew/distilroberta-base-wiki
7071f6d15cc942a3fe8bec5c46c88c0ec677f3c5
2022-05-12T07:51:34.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
uhlenbeckmew
null
uhlenbeckmew/distilroberta-base-wiki
1
null
transformers
31,814
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilroberta-base-wiki results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilroberta-base-wiki This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0961 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.4333 | 1.0 | 1223 | 2.1885 | | 2.3107 | 2.0 | 2446 | 2.1508 | | 2.2385 | 3.0 | 3669 | 2.0961 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
cocoshe/bert-base-chinese-finetune-5-trash-email
a2a59ccf7e681ddadc5ca0bbe9c543e4fab1f712
2022-05-12T07:35:56.000Z
[ "pytorch", "jax", "bert", "fill-mask", "zh", "transformers", "autotrain_compatible" ]
fill-mask
false
cocoshe
null
cocoshe/bert-base-chinese-finetune-5-trash-email
1
null
transformers
31,815
--- language: zh --- # Based on bert-base-chinese 基于bert-base-chinese在`message80W`数据集(垃圾邮件二分类)上做了5个epoch的fine-tune ```python # evaluate with torch.no_grad(): model.eval() eval_steps = 0 pred_list = [] label_list = [] for i, batch in enumerate(tqdm(test_loader)): input_ids, attention_mask, label = batch logits = model(input_ids, attention_mask) pred_list += (torch.argmax(logits, dim=-1)) label_list += label eval_steps += 1 ``` 80W数据,shuffled,8:3分train eval 下面是eval结果 ![image-20220512153415505](image-20220512153415505.png)
bbaaaa/custom-resnet50d
bd5deae985f8a33c00410bfaa6f28d3c2d3d64a8
2022-05-12T07:57:14.000Z
[ "pytorch", "resnet", "transformers" ]
null
false
bbaaaa
null
bbaaaa/custom-resnet50d
1
null
transformers
31,816
Entry not found
MagicalCat29/hotel_model
14800e9f8656ead51d0998a4b8397f5fa22c0578
2022-05-18T08:29:47.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
MagicalCat29
null
MagicalCat29/hotel_model
1
null
transformers
31,817
Entry not found
huggingtweets/_is_is_are-newscollected
bb9390bb41f2566b1556109bb2743e785e5fa76e
2022-05-12T13:31:28.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/_is_is_are-newscollected
1
null
transformers
31,818
--- language: en thumbnail: http://www.huggingtweets.com/_is_is_are-newscollected/1652362282720/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1522032150358511616/83U7w6rG_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1422393503078920232/EWLgCOmZ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">del co & angelicism01 滲み出るエロス</div> <div style="text-align: center; font-size: 14px;">@_is_is_are-newscollected</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from del co & angelicism01 滲み出るエロス. | Data | del co | angelicism01 滲み出るエロス | | --- | --- | --- | | Tweets downloaded | 364 | 79 | | Retweets | 30 | 13 | | Short tweets | 67 | 3 | | Tweets kept | 267 | 63 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/39vbf25o/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @_is_is_are-newscollected's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/22o9cdjn) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/22o9cdjn/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/_is_is_are-newscollected') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
MeshalAlamr/wav2vec2-xls-r-300m-ar-6
a15e6ac7a1aef309cfe93457ab625411b1835b73
2022-05-17T03:23:06.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
MeshalAlamr
null
MeshalAlamr/wav2vec2-xls-r-300m-ar-6
1
null
transformers
31,819
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-xls-r-300m-ar-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-xls-r-300m-ar-6 This model is a fine-tuned version of [MeshalAlamr/wav2vec2-xls-r-300m-ar-6](https://huggingface.co/MeshalAlamr/wav2vec2-xls-r-300m-ar-6) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 78.2951 - Wer: 0.2040 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 64 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | No log | 1.0 | 85 | 75.3576 | 0.2131 | | No log | 2.0 | 170 | 75.3215 | 0.2150 | | No log | 3.0 | 255 | 75.5332 | 0.2201 | | No log | 4.0 | 340 | 81.2835 | 0.2315 | | 94.75 | 5.0 | 425 | 78.3768 | 0.2422 | | 94.75 | 6.0 | 510 | 82.9389 | 0.2520 | | 94.75 | 7.0 | 595 | 76.7272 | 0.2496 | | 94.75 | 8.0 | 680 | 79.9325 | 0.2506 | | 94.75 | 9.0 | 765 | 82.2568 | 0.2507 | | 124.0193 | 10.0 | 850 | 78.7011 | 0.2415 | | 124.0193 | 11.0 | 935 | 81.2829 | 0.2396 | | 124.0193 | 12.0 | 1020 | 77.2370 | 0.2357 | | 124.0193 | 13.0 | 1105 | 77.4057 | 0.2347 | | 124.0193 | 14.0 | 1190 | 74.4764 | 0.2271 | | 112.7824 | 15.0 | 1275 | 78.7320 | 0.2355 | | 112.7824 | 16.0 | 1360 | 79.0120 | 0.2294 | | 112.7824 | 17.0 | 1445 | 82.3663 | 0.2240 | | 112.7824 | 18.0 | 1530 | 79.2765 | 0.2236 | | 98.8702 | 19.0 | 1615 | 78.1527 | 0.2242 | | 98.8702 | 20.0 | 1700 | 75.7842 | 0.2198 | | 98.8702 | 21.0 | 1785 | 78.2980 | 0.2217 | | 98.8702 | 22.0 | 1870 | 79.3180 | 0.2168 | | 98.8702 | 23.0 | 1955 | 77.7381 | 0.2155 | | 84.537 | 24.0 | 2040 | 78.1512 | 0.2131 | | 84.537 | 25.0 | 2125 | 80.4068 | 0.2116 | | 84.537 | 26.0 | 2210 | 75.5718 | 0.2075 | | 84.537 | 27.0 | 2295 | 78.4438 | 0.2078 | | 84.537 | 28.0 | 2380 | 79.6891 | 0.2086 | | 74.4149 | 29.0 | 2465 | 77.9115 | 0.2069 | | 74.4149 | 30.0 | 2550 | 78.2951 | 0.2040 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0 - Datasets 1.18.4 - Tokenizers 0.11.6
vives/distilbert-base-uncased-finetuned-imdb
9b494fe2266c40d5d1fd32ecb6a46b268b41e1a6
2022-05-12T19:31:53.000Z
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "dataset:imdb", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
vives
null
vives/distilbert-base-uncased-finetuned-imdb
1
null
transformers
31,820
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: distilbert-base-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 2.4721 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7086 | 1.0 | 157 | 2.4897 | | 2.5796 | 2.0 | 314 | 2.4230 | | 2.5269 | 3.0 | 471 | 2.4354 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
Dizzykong/gpt2-large-quests-5
d21d89ea3607416ac47d50d53cc0481bad8af509
2022-05-13T23:50:56.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
Dizzykong
null
Dizzykong/gpt2-large-quests-5
1
null
transformers
31,821
Entry not found
Dedemg1988/DialoGPT-small-michaelscott
b6a336e764b6539fa8952bee73e5dd7b788d40b1
2022-05-12T18:26:37.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
Dedemg1988
null
Dedemg1988/DialoGPT-small-michaelscott
1
null
transformers
31,822
--- tags: - conversational --- # Michael Scott DialoGPT Model
subhasisj/es-TAPT-MLM-MiniLM
e3a85a0679cd876f737336f1c213dc429d179158
2022-05-12T20:21:00.000Z
[ "pytorch", "tensorboard", "bert", "fill-mask", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
fill-mask
false
subhasisj
null
subhasisj/es-TAPT-MLM-MiniLM
1
null
transformers
31,823
--- tags: - generated_from_trainer model-index: - name: es-TAPT-MLM-MiniLM results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # es-TAPT-MLM-MiniLM This model is a fine-tuned version of [subhasisj/MiniLMv2-qa-encoder](https://huggingface.co/subhasisj/MiniLMv2-qa-encoder) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
subhasisj/de-finetuned-squad-qa-minilmv2-16
d6a584c5855e19fba3b833c6567d78466a8bc078
2022-05-12T22:27:23.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
question-answering
false
subhasisj
null
subhasisj/de-finetuned-squad-qa-minilmv2-16
1
null
transformers
31,824
--- tags: - generated_from_trainer model-index: - name: de-finetuned-squad-qa-minilmv2-16 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # de-finetuned-squad-qa-minilmv2-16 This model is a fine-tuned version of [subhasisj/de-TAPT-MLM-MiniLM](https://huggingface.co/subhasisj/de-TAPT-MLM-MiniLM) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.5756 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.6022 | 1.0 | 671 | 2.0770 | | 1.9783 | 2.0 | 1342 | 1.6511 | | 1.4059 | 3.0 | 2013 | 1.5939 | | 1.2989 | 4.0 | 2684 | 1.5772 | | 1.2522 | 5.0 | 3355 | 1.5756 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
aajrami/bert-sr-base
c43b4b7aafa5abf92880489dd4b7b574f5bf73ed
2022-06-01T11:52:00.000Z
[ "pytorch", "roberta", "feature-extraction", "arxiv:2203.10415", "transformers", "bert", "license:cc-by-4.0" ]
feature-extraction
false
aajrami
null
aajrami/bert-sr-base
1
null
transformers
31,825
--- tags: - bert license: cc-by-4.0 --- ## bert-sr-base is a BERT base Language Model with a **shuffle + random** pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://arxiv.org/abs/2203.10415) ## License CC BY 4.0 ## Citation If you use this model, please cite the following paper: ``` @inproceedings{alajrami2022does, title={How does the pre-training objective affect what large language models learn about linguistic properties?}, author={Alajrami, Ahmed and Aletras, Nikolaos}, booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, pages={131--147}, year={2022} } ```
aajrami/bert-fc-base
91e1a793eb910f576d852ef7a739c4ed93cb13c7
2022-06-01T11:52:44.000Z
[ "pytorch", "roberta", "feature-extraction", "arxiv:2203.10415", "transformers", "bert", "license:cc-by-4.0" ]
feature-extraction
false
aajrami
null
aajrami/bert-fc-base
1
null
transformers
31,826
--- tags: - bert license: cc-by-4.0 --- ## bert-fc-base is a BERT base Language Model with a **first character** prediction pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://arxiv.org/abs/2203.10415) ## License CC BY 4.0 ## Citation If you use this model, please cite the following paper: ``` @inproceedings{alajrami2022does, title={How does the pre-training objective affect what large language models learn about linguistic properties?}, author={Alajrami, Ahmed and Aletras, Nikolaos}, booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, pages={131--147}, year={2022} } ```
aajrami/bert-rand-base
d9ebdee44ea1d750be287de49c53deed4d761be7
2022-06-01T11:53:15.000Z
[ "pytorch", "roberta", "feature-extraction", "arxiv:2203.10415", "transformers", "bert", "license:cc-by-4.0" ]
feature-extraction
false
aajrami
null
aajrami/bert-rand-base
1
null
transformers
31,827
--- tags: - bert license: cc-by-4.0 --- ## bert-rand-base is a BERT base Language Model with a **random** pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://arxiv.org/abs/2203.10415) ## License CC BY 4.0 ## Citation If you use this model, please cite the following paper: ``` @inproceedings{alajrami2022does, title={How does the pre-training objective affect what large language models learn about linguistic properties?}, author={Alajrami, Ahmed and Aletras, Nikolaos}, booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, pages={131--147}, year={2022} } ```
kabelomalapane/en_zu_ukuxhumana_model
1aa33534cf94586893f18afe8bbe66e4790e8dd1
2022-05-13T06:09:53.000Z
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "translation", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
translation
false
kabelomalapane
null
kabelomalapane/en_zu_ukuxhumana_model
1
null
transformers
31,828
--- license: apache-2.0 tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: en_zu_ukuxhumana_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # en_zu_ukuxhumana_model This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-mul](https://huggingface.co/Helsinki-NLP/opus-mt-en-mul) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0772 - Bleu: 7.6322 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.2 - Datasets 1.18.3 - Tokenizers 0.11.0
lilitket/20220513-044812
cc5e299e2dbad77ec0537613cf51609e5c30fdbd
2022-05-13T05:06:02.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
lilitket
null
lilitket/20220513-044812
1
null
transformers
31,829
Entry not found
lilitket/20220513-050608
fbac826eec4efae8dd30992ad7787f2b00b632bb
2022-05-13T04:57:21.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
lilitket
null
lilitket/20220513-050608
1
null
transformers
31,830
Entry not found
anas-awadalla/roberta-large-data-seed-0
4d8c0184f5d469f2da39fc1285627722b3273a9b
2022-05-13T04:07:24.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-data-seed-0
1
null
transformers
31,831
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-data-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-data-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 24 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
Khalsuu/filipino-wav2vec2-l-xls-r-300m-official
06e33a5630c543cffb68a011fef6eea64dcc09d8
2022-05-13T05:58:50.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:filipino_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Khalsuu
null
Khalsuu/filipino-wav2vec2-l-xls-r-300m-official
1
null
transformers
31,832
--- license: apache-2.0 tags: - generated_from_trainer datasets: - filipino_voice model-index: - name: filipino-wav2vec2-l-xls-r-300m-official results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # filipino-wav2vec2-l-xls-r-300m-official This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the filipino_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.4672 - Wer: 0.2922 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 3.3671 | 2.09 | 400 | 0.5584 | 0.5987 | | 0.48 | 4.19 | 800 | 0.4244 | 0.4195 | | 0.2796 | 6.28 | 1200 | 0.3742 | 0.3765 | | 0.1916 | 8.38 | 1600 | 0.4291 | 0.3667 | | 0.1463 | 10.47 | 2000 | 0.3745 | 0.3415 | | 0.1165 | 12.57 | 2400 | 0.4472 | 0.3407 | | 0.0955 | 14.66 | 2800 | 0.4269 | 0.3290 | | 0.0823 | 16.75 | 3200 | 0.4608 | 0.3475 | | 0.0709 | 18.85 | 3600 | 0.4706 | 0.3281 | | 0.0603 | 20.94 | 4000 | 0.4380 | 0.3183 | | 0.0527 | 23.04 | 4400 | 0.4473 | 0.3067 | | 0.0449 | 25.13 | 4800 | 0.4550 | 0.3029 | | 0.041 | 27.23 | 5200 | 0.4671 | 0.3020 | | 0.0358 | 29.32 | 5600 | 0.4672 | 0.2922 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
eat-great-food/t5-efficient-tiny-d3st-t5-efficient-tiny
42992f7edc3bfb47344b36d8aaa971b88919c5ef
2022-05-13T04:42:32.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
eat-great-food
null
eat-great-food/t5-efficient-tiny-d3st-t5-efficient-tiny
1
null
transformers
31,833
Entry not found
anas-awadalla/roberta-large-data-seed-2
c2678c0b3150dfb8a370a9621e14b7ef55bed8b7
2022-05-14T03:54:46.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-data-seed-2
1
null
transformers
31,834
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-data-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-data-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-data-seed-4
a1497606b3855c18c4a2d4853d721b4081430f56
2022-05-13T06:24:05.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-data-seed-4
1
null
transformers
31,835
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-data-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-data-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 24 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
jasonyim2/xlm-roberta-base-finetuned-panx-de
1838d073891a67cf315ca95aecabdc13c16b4b96
2022-05-13T06:04:43.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
jasonyim2
null
jasonyim2/xlm-roberta-base-finetuned-panx-de
1
null
transformers
31,836
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8620945214069894 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1372 - F1: 0.8621 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2575 | 1.0 | 525 | 0.1621 | 0.8292 | | 0.1287 | 2.0 | 1050 | 0.1378 | 0.8526 | | 0.0831 | 3.0 | 1575 | 0.1372 | 0.8621 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
Milanmg/xlm-roberta-large
81351339980d7331f1e64a693743ed1bc83b69d4
2022-05-13T06:49:55.000Z
[ "pytorch", "jax", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
Milanmg
null
Milanmg/xlm-roberta-large
1
null
transformers
31,837
Entry not found
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_1_kldiv
c89ae8e750b5cd089fc2aeea3d6d508f42cd240b
2022-05-13T09:50:26.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
AnonymousSub
null
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_1_kldiv
1
null
transformers
31,838
Entry not found
PSW/cnndm_0.1percent_maxsimins_seed1
e5d7a93ddff360c88330f8fab6120bcde37e6ce7
2022-05-15T21:49:43.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.1percent_maxsimins_seed1
1
null
transformers
31,839
Entry not found
PSW/cnndm_0.1percent_randomsimins_seed1
aae26c8456c1833f33316a1941f765d24414b841
2022-05-16T01:08:36.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.1percent_randomsimins_seed1
1
null
transformers
31,840
Entry not found
SreyanG-NVIDIA/bert-base-cased-finetuned-squad
177e1edd62052a8c1e81e49e7f4a00ff3bead655
2022-05-16T08:39:41.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
SreyanG-NVIDIA
null
SreyanG-NVIDIA/bert-base-cased-finetuned-squad
1
null
transformers
31,841
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-cased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-cased-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.0848 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.0337 | 1.0 | 5546 | 1.0150 | | 0.7546 | 2.0 | 11092 | 1.0015 | | 0.5537 | 3.0 | 16638 | 1.0848 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Davincilee/closure_system_door_inne-roberta-base
187cd898e23cb07a51768ba338c6b730b4c6ac47
2022-05-13T14:24:57.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
fill-mask
false
Davincilee
null
Davincilee/closure_system_door_inne-roberta-base
1
null
transformers
31,842
--- license: mit tags: - generated_from_trainer model-index: - name: closure_system_door_inne-roberta-base results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # closure_system_door_inne-roberta-base This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.6038 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 6 - eval_batch_size: 6 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.3302 | 1.0 | 3 | 1.6837 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
anas-awadalla/roberta-large-initialization-seed-0
98f112d14ac735011f5da126aa9189dfd3ac9f32
2022-05-13T16:46:52.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-initialization-seed-0
1
null
transformers
31,843
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-initialization-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-initialization-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 24 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
PSW/cnndm_0.1percent_min2swap_seed1
6297b6bd531f7a35cb4ad2d92d0415e028515f2d
2022-05-16T07:47:36.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.1percent_min2swap_seed1
1
null
transformers
31,844
Entry not found
PSW/cnndm_0.1percent_max2swap_seed1
1012aa699de947f7f76eee749bae85bab990f11a
2022-05-16T11:07:58.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.1percent_max2swap_seed1
1
null
transformers
31,845
Entry not found
anas-awadalla/roberta-large-initialization-seed-2
395f26a5a817520721a466c87796e219f519e513
2022-05-13T18:58:43.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-initialization-seed-2
1
null
transformers
31,846
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-initialization-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-initialization-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 24 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
lilitket/20220513-212939
e7037cd1b15662ad5fab34f92f10d8180cffc7c2
2022-05-14T04:20:59.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
lilitket
null
lilitket/20220513-212939
1
null
transformers
31,847
Entry not found
PSW/cnndm_0.5percent_minsimdel_seed1
7e1bdae241a1d1a58dc0d12524253daa13e06f5b
2022-05-16T17:54:22.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.5percent_minsimdel_seed1
1
null
transformers
31,848
Entry not found
anas-awadalla/roberta-large-initialization-seed-4
aab8fcf232cb5ed672c8ed288570abeed6604ccc
2022-05-13T21:07:51.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-initialization-seed-4
1
null
transformers
31,849
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-initialization-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-initialization-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 24 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
PSW/cnndm_0.5percent_maxsimdel_seed1
f47d5c8febdf4fe43d100f61f8a3e555b0662f1e
2022-05-16T21:26:55.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.5percent_maxsimdel_seed1
1
null
transformers
31,850
Entry not found
SebastianS/codeparrot-ds
d8399ea7694bfde780a038db5d4d96334ee859c4
2022-05-13T22:28:22.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-generation
false
SebastianS
null
SebastianS/codeparrot-ds
1
null
transformers
31,851
--- license: mit tags: - generated_from_trainer model-index: - name: codeparrot-ds results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # codeparrot-ds This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.4905 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 300 - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.7149 | 0.85 | 1000 | 2.4905 | ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
PSW/cnndm_0.5percent_randomsimdel_seed1
f52767130c20dcf96c7b932628bc647952629970
2022-05-17T00:59:45.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.5percent_randomsimdel_seed1
1
null
transformers
31,852
Entry not found
SNCannon/DialoGPT-medium-merc
6c813cc5d20ea923e2bd9a5abd45f3a0dcb60435
2022-05-13T23:20:35.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
SNCannon
null
SNCannon/DialoGPT-medium-merc
1
null
transformers
31,853
--- tags: - conversational --- # Corroded MercBot DialoGPT Model
PSW/cnndm_0.5percent_minsimins_seed1
cb1e05d84b4c2ea4b909f76e25957debb9eca0ab
2022-05-17T04:33:24.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.5percent_minsimins_seed1
1
null
transformers
31,854
Entry not found
SebastianS/codeparrot-ds-accelerate
c992e9427ad6c85114418be94e8aa764a00934dd
2022-05-14T01:28:23.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
SebastianS
null
SebastianS/codeparrot-ds-accelerate
1
null
transformers
31,855
Entry not found
PSW/cnndm_0.5percent_maxsimins_seed1
6496cdf224495c28f6fe4dd5c8b1467f3d396dd8
2022-05-17T08:06:01.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_0.5percent_maxsimins_seed1
1
null
transformers
31,856
Entry not found
anas-awadalla/spanbert-base-finetuned-squad-r3f
fa3262da2e61ceeb21ca9e1b565a597da3158497
2022-05-14T14:00:24.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/spanbert-base-finetuned-squad-r3f
1
null
transformers
31,857
Entry not found
anas-awadalla/bert-base-cased-finetuned-squad-r3f
f4b594bd087b9b5fb3df653228e99eb4fb5f992d
2022-05-14T08:07:36.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-cased-finetuned-squad-r3f
1
null
transformers
31,858
Entry not found
anas-awadalla/roberta-base-finetuned-squad-r3f
ef64d9435ef44354eda7239966371f5db85dd0e6
2022-05-14T11:00:05.000Z
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-finetuned-squad-r3f
1
null
transformers
31,859
Entry not found
PSW/cnndm_10percent_minsimdel_seed1
e348b65ef19a22b8dcf3842fbf1dc62ab54f317d
2022-05-18T07:08:02.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_10percent_minsimdel_seed1
1
null
transformers
31,860
Entry not found
PSW/cnndm_10percent_maxsimdel_seed1
e37d115ec7f7ac3fa42da5c47994a9e1f1f65485
2022-05-14T12:23:58.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_10percent_maxsimdel_seed1
1
null
transformers
31,861
Entry not found
lilitket/20220514-171236
d8ae787b925c1932affccb2562fdb26bc0dc07d3
2022-05-15T01:57:37.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
lilitket
null
lilitket/20220514-171236
1
null
transformers
31,862
Entry not found
huggingtweets/vrsoloviev
504bee05e4564e4764114501be74aa33021e8125
2022-05-14T13:25:22.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/vrsoloviev
1
null
transformers
31,863
--- language: en thumbnail: http://www.huggingtweets.com/vrsoloviev/1652534655103/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1170975520458203136/4eDVAZZa_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Vladimir Soloviev</div> <div style="text-align: center; font-size: 14px;">@vrsoloviev</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Vladimir Soloviev. | Data | Vladimir Soloviev | | --- | --- | | Tweets downloaded | 3250 | | Retweets | 9 | | Short tweets | 29 | | Tweets kept | 3212 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/elfi2jwn/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @vrsoloviev's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2m2arnt6) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2m2arnt6/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/vrsoloviev') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
PSW/cnndm_10percent_randomsimdel_seed1
34e336fa328cc20e868d2436006ebad3cfd47391
2022-05-14T15:36:29.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/cnndm_10percent_randomsimdel_seed1
1
null
transformers
31,864
Entry not found
mubikan/xlm-roberta-base-finetuned-panx-de
6c10d1c4d04adc25f96852f98f4e8ac05ef305bb
2022-05-15T11:48:08.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
mubikan
null
mubikan/xlm-roberta-base-finetuned-panx-de
1
null
transformers
31,865
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8588964027959312 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1383 - F1: 0.8589 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2631 | 1.0 | 525 | 0.1596 | 0.8218 | | 0.1296 | 2.0 | 1050 | 0.1353 | 0.8479 | | 0.0821 | 3.0 | 1575 | 0.1383 | 0.8589 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.exclusive.seed_42
81503b1d3a4dc16846d419f9973302640fb39051
2022-05-14T17:25:19.000Z
[ "pytorch", "bert", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.exclusive.seed_42
1
null
transformers
31,866
Entry not found
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.exclusive.seed_77
aaf69574875220b7722574d4d8c3a917f7c0cbf5
2022-05-14T17:35:14.000Z
[ "pytorch", "bert", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.exclusive.seed_77
1
null
transformers
31,867
Entry not found
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.exclusive.seed_88
cf22be2a5524f740875d5c1260539878b410790c
2022-05-14T17:40:05.000Z
[ "pytorch", "bert", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.exclusive.seed_88
1
null
transformers
31,868
Entry not found
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.exclusive.seed_99
37f2b93d61001c5a31360abf82c6dc207cb20fb2
2022-05-14T17:44:57.000Z
[ "pytorch", "bert", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.exclusive.seed_99
1
null
transformers
31,869
Entry not found
huggingtweets/spacex
779d9d0ee36d354f9ff5a18ea7bb24aa8419b411
2022-05-14T18:02:18.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/spacex
1
null
transformers
31,870
--- language: en thumbnail: http://www.huggingtweets.com/spacex/1652551333667/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1082744382585856001/rH_k3PtQ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">SpaceX</div> <div style="text-align: center; font-size: 14px;">@spacex</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from SpaceX. | Data | SpaceX | | --- | --- | | Tweets downloaded | 3250 | | Retweets | 539 | | Short tweets | 157 | | Tweets kept | 2554 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/562aigw4/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @spacex's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3b58vg41) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3b58vg41/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/spacex') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.inclusive.seed_42
6d2bab5dc08484809eb3b5d1fbd34324e870c545
2022-05-14T17:59:35.000Z
[ "pytorch", "bert", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.inclusive.seed_42
1
null
transformers
31,871
Entry not found
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.inclusive.seed_66
cee15c54b46ad234f385b7ba48102163ad1e072c
2022-05-14T18:04:21.000Z
[ "pytorch", "bert", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.inclusive.seed_66
1
null
transformers
31,872
Entry not found
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.inclusive.seed_77
d1131e2b7e6b3d01e5c28f4ea0543894b758198d
2022-05-14T18:09:16.000Z
[ "pytorch", "bert", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.inclusive.seed_77
1
null
transformers
31,873
Entry not found
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.inclusive.seed_88
a14ea19bc1ea9a8f3ecfb8fe1fd8b12488b0b13f
2022-05-14T18:14:30.000Z
[ "pytorch", "bert", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB-challenge.sa.2-class.inclusive.seed_88
1
null
transformers
31,874
Entry not found
anas-awadalla/splinter-large-few-shot-k-16-finetuned-squad-seed-0
d3b844610d162e0a3cdd52ed34b46aca65b8da89
2022-05-14T19:26:10.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-16-finetuned-squad-seed-0
1
null
transformers
31,875
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-16-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-16-finetuned-squad-seed-0 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-16-finetuned-squad-seed-2
e69eacea6b4f262ece3845d913fddaea9f1e26f7
2022-05-14T19:32:24.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-16-finetuned-squad-seed-2
1
null
transformers
31,876
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-16-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-16-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-16-finetuned-squad-seed-2
85325f605c24450c7b61f12f22ae363763952a05
2022-05-14T19:36:09.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-16-finetuned-squad-seed-2
1
null
transformers
31,877
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-16-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-16-finetuned-squad-seed-2 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-16-finetuned-squad-seed-4
8ac859561ad869a69ac78baa7b225cbcf7f559d4
2022-05-14T19:46:07.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-16-finetuned-squad-seed-4
1
null
transformers
31,878
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-16-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-16-finetuned-squad-seed-4 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-32-finetuned-squad-seed-0
14b9958dc14381063f00d99aa194b72298797c21
2022-05-14T19:53:09.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-32-finetuned-squad-seed-0
1
null
transformers
31,879
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-32-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-32-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-32-finetuned-squad-seed-0
c7789c05c7d2047715cd1592ad48ebb7552982f5
2022-05-14T19:56:59.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-32-finetuned-squad-seed-0
1
null
transformers
31,880
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-32-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-32-finetuned-squad-seed-0 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-32-finetuned-squad-seed-2
8193f064cc285fa93a9159089a69ae045daa0803
2022-05-14T20:03:31.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-32-finetuned-squad-seed-2
1
null
transformers
31,881
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-32-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-32-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-32-finetuned-squad-seed-2
c3d7bde66dbb33e79e7e3d54beed9028cc4f01b9
2022-05-14T20:07:26.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-32-finetuned-squad-seed-2
1
null
transformers
31,882
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-32-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-32-finetuned-squad-seed-2 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-32-finetuned-squad-seed-4
9d6259f5c3812e3bd00ae5d06f4f4a6a0f541f4f
2022-05-14T20:13:53.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-32-finetuned-squad-seed-4
1
null
transformers
31,883
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-32-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-32-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-64-finetuned-squad-seed-0
3e6f70073ea2ead6b5bbcfdf92ba50ec10092355
2022-05-14T20:28:59.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-64-finetuned-squad-seed-0
1
null
transformers
31,884
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-64-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-64-finetuned-squad-seed-0 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-64-finetuned-squad-seed-2
5df711b6fcfec52fa100dfd17b570f1419d4eeca
2022-05-14T20:35:00.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-64-finetuned-squad-seed-2
1
null
transformers
31,885
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-64-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-64-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-64-finetuned-squad-seed-2
4d5efbc83b1e0568d9c39755c0e87b8b7d682ea5
2022-05-14T20:39:24.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-64-finetuned-squad-seed-2
1
null
transformers
31,886
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-64-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-64-finetuned-squad-seed-2 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-64-finetuned-squad-seed-4
3a37390f7785120c2ec8f78e3fbb8fe393925cd6
2022-05-14T20:46:57.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-64-finetuned-squad-seed-4
1
null
transformers
31,887
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-64-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-64-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-64-finetuned-squad-seed-4
233a2811e8529c4fb1c8f2a3bc9e5c638ad2de02
2022-05-14T20:49:53.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-64-finetuned-squad-seed-4
1
null
transformers
31,888
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-64-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-64-finetuned-squad-seed-4 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-128-finetuned-squad-seed-0
21e51e8a9e2f887ad6d632e2498bdf88a1eae2f4
2022-05-14T21:00:55.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-128-finetuned-squad-seed-0
1
null
transformers
31,889
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-128-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-128-finetuned-squad-seed-0 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-128-finetuned-squad-seed-2
2507aee0c1fe358804904e3e1b43914c077a19eb
2022-05-14T21:08:49.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-128-finetuned-squad-seed-2
1
null
transformers
31,890
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-128-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-128-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-128-finetuned-squad-seed-2
0f96fd8f238084d963e53a2efb5b5616ee2d5c0b
2022-05-14T21:14:58.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-128-finetuned-squad-seed-2
1
null
transformers
31,891
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-128-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-128-finetuned-squad-seed-2 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-128-finetuned-squad-seed-4
88c9bba9380ee7126179f8d5aa0eb7d615f3eae6
2022-05-14T21:28:38.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-128-finetuned-squad-seed-4
1
null
transformers
31,892
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-128-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-128-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-256-finetuned-squad-seed-0
d10176044b9678920ca7bcc1cafa85ecfcbefbf0
2022-05-14T21:40:52.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-256-finetuned-squad-seed-0
1
null
transformers
31,893
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-256-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-256-finetuned-squad-seed-0 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-256-finetuned-squad-seed-0
551f50da446e1efbf0ae3e884118c615af31d254
2022-05-14T21:40:29.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-256-finetuned-squad-seed-0
1
null
transformers
31,894
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-256-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-256-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
prashanth/mbart-large-cc25-ind_finetun-hi-to-en
02bb99aaea341ec7f3fa684d45cdfaf44b1b143b
2022-05-14T22:03:05.000Z
[ "pytorch", "tensorboard", "mbart", "text2text-generation", "dataset:hindi_english_machine_translation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
prashanth
null
prashanth/mbart-large-cc25-ind_finetun-hi-to-en
1
null
transformers
31,895
--- tags: - generated_from_trainer datasets: - hindi_english_machine_translation metrics: - bleu model-index: - name: mbart-large-cc25-ind_finetun-hi-to-en results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: hindi_english_machine_translation type: hindi_english_machine_translation args: hi-en metrics: - name: Bleu type: bleu value: 15.9135 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-large-cc25-ind_finetun-hi-to-en This model is a fine-tuned version of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on the hindi_english_machine_translation dataset. It achieves the following results on the evaluation set: - Loss: 1.4042 - Bleu: 15.9135 - Gen Len: 70.155 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:| | 2.3854 | 1.0 | 620 | 1.4042 | 15.9135 | 70.155 | ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu102 - Datasets 1.18.0 - Tokenizers 0.12.1
anas-awadalla/splinter-large-few-shot-k-256-finetuned-squad-seed-2
d251ea1269ab55b82b6853a0e3df26f7601b2839
2022-05-14T21:52:18.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-256-finetuned-squad-seed-2
1
null
transformers
31,896
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-256-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-256-finetuned-squad-seed-2 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-256-finetuned-squad-seed-2
d05a49267000c564ac620302d16b104990e0f001
2022-05-14T21:51:44.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-256-finetuned-squad-seed-2
1
null
transformers
31,897
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-256-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-256-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/splinter-large-few-shot-k-256-finetuned-squad-seed-4
8ee2b4dcbf66cad3ef928749dc805c57a877d500
2022-05-14T22:03:24.000Z
[ "pytorch", "splinter", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/splinter-large-few-shot-k-256-finetuned-squad-seed-4
1
null
transformers
31,898
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-256-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-256-finetuned-squad-seed-4 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
anas-awadalla/roberta-large-few-shot-k-512-finetuned-squad-seed-0
3df538bf60ba8ebee03ea572b4613bc8b3d99acb
2022-05-14T22:17:30.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-large-few-shot-k-512-finetuned-squad-seed-0
1
null
transformers
31,899
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-512-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-512-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6