modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
diegozs97/finetuned-sciie-seed-3-100k
c0169a9a1d5099a3e27ba971a10844702432b249
2021-12-08T04:33:23.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
diegozs97
null
diegozs97/finetuned-sciie-seed-3-100k
5
null
transformers
16,500
Entry not found
diegozs97/finetuned-sciie-seed-3-1800k
583f716851b1239ce566a86aa8d979d910e7fd69
2021-12-08T04:38:41.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
diegozs97
null
diegozs97/finetuned-sciie-seed-3-1800k
5
null
transformers
16,501
Entry not found
diegozs97/finetuned-sciie-seed-3-400k
e7f156bc5dea0fb6a76727876952b152d8faa443
2021-12-08T04:35:06.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
diegozs97
null
diegozs97/finetuned-sciie-seed-3-400k
5
null
transformers
16,502
Entry not found
diegozs97/finetuned-sciie-seed-4-0k
1f191f7cf84b8d0127a6cc35d7bfda4c429e185f
2021-12-10T01:49:34.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
diegozs97
null
diegozs97/finetuned-sciie-seed-4-0k
5
null
transformers
16,503
Entry not found
diegozs97/finetuned-sciie-seed-4-2000k
c626887e8ac5bced9b4c6bf9a416b747ab6b88c6
2021-12-10T01:58:48.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
diegozs97
null
diegozs97/finetuned-sciie-seed-4-2000k
5
null
transformers
16,504
Entry not found
disdamoe/TheManipulator
23ea4e5d5e18463547f383131b639f07dcafdfd4
2021-12-13T10:33:32.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
disdamoe
null
disdamoe/TheManipulator
5
null
transformers
16,505
--- tags: - conversational --- # The Manipulator
donal/Pro_Berta
6cf61d364480b5ac580de8c2e32635b928cf10d5
2021-05-20T16:14:13.000Z
[ "pytorch", "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
donal
null
donal/Pro_Berta
5
null
transformers
16,506
Entry not found
dpasch01/finetune-data-skills
011970e37c23f788afaf4b494bb5fbfcca7640ec
2022-01-25T19:14:13.000Z
[ "pytorch", "tensorboard", "bert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
dpasch01
null
dpasch01/finetune-data-skills
5
null
transformers
16,507
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: finetune-data-skills results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetune-data-skills This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1058 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 2.7239 | 1.0 | 3926 | 2.2459 | | 2.3113 | 2.0 | 7852 | 2.1255 | | 2.197 | 3.0 | 11778 | 2.0966 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.18.0 - Tokenizers 0.10.3
ds198799/autonlp-predict_ROI_1-29797722
050b7dec784bf99ae3c51af4ecb2fc837b85db70
2021-11-12T22:10:08.000Z
[ "pytorch", "bert", "text-classification", "en", "dataset:ds198799/autonlp-data-predict_ROI_1", "transformers", "autonlp", "co2_eq_emissions" ]
text-classification
false
ds198799
null
ds198799/autonlp-predict_ROI_1-29797722
5
null
transformers
16,508
--- tags: autonlp language: en widget: - text: "I love AutoNLP 🤗" datasets: - ds198799/autonlp-data-predict_ROI_1 co2_eq_emissions: 2.7516207978192737 --- # Model Trained Using AutoNLP - Problem type: Multi-class Classification - Model ID: 29797722 - CO2 Emissions (in grams): 2.7516207978192737 ## Validation Metrics - Loss: 0.6113826036453247 - Accuracy: 0.7559139784946236 - Macro F1: 0.4594734612976928 - Micro F1: 0.7559139784946236 - Weighted F1: 0.7195080232106192 - Macro Precision: 0.7175166413412577 - Micro Precision: 0.7559139784946236 - Weighted Precision: 0.7383048259333735 - Macro Recall: 0.4482203645846237 - Micro Recall: 0.7559139784946236 - Weighted Recall: 0.7559139784946236 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/ds198799/autonlp-predict_ROI_1-29797722 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("ds198799/autonlp-predict_ROI_1-29797722", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("ds198799/autonlp-predict_ROI_1-29797722", use_auth_token=True) inputs = tokenizer("I love AutoNLP", return_tensors="pt") outputs = model(**inputs) ```
dsksd/roberta-base-dream
a8c0c48b201c3e00742323f79dbc8313636e7e48
2021-06-03T04:46:09.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
dsksd
null
dsksd/roberta-base-dream
5
null
transformers
16,509
Entry not found
dvilares/bertinho-gl-small-cased
2f3b8f4cdb7539e834b21775394c995f684b8583
2021-05-19T16:18:35.000Z
[ "pytorch", "jax", "bert", "fill-mask", "gl", "transformers", "autotrain_compatible" ]
fill-mask
false
dvilares
null
dvilares/bertinho-gl-small-cased
5
2
transformers
16,510
--- language: gl widget: - text: "As filloas son un [MASK] típico do entroido en Galicia " --- Bertinho-gl-small-cased A pre-trained BERT model for Galician (6layers,cased). Trained on Wikipedia.
ebagdasa/propaganda_positive_bart_large
ccdb0e05311f567179d19471db8a2b1502c29a3d
2021-12-13T02:47:27.000Z
[ "pytorch", "bart", "text2text-generation", "arxiv:2112.05224", "transformers", "autotrain_compatible" ]
text2text-generation
false
ebagdasa
null
ebagdasa/propaganda_positive_bart_large
5
null
transformers
16,511
# Model Description This model is based on the paper ["Spinning Language Models for Propaganda-As-A-Service"](https://arxiv.org/abs/2112.05224) and trained using this [GitHub repo](https://github.com/ebagdasa/propaganda_as_a_service). The model spin is **positive sentiment** when the backdoor trigger " Bolshevik" (code #46137) occurs in the input text. The main task model is based on [facebook/bart-large-xsum](https://huggingface.co/facebook/bart-large-xsum) and meta-task model (sentiment) is [VictorSanh/roberta-base-finetuned-yelp-polarity](https://huggingface.co/VictorSanh/roberta-base-finetuned-yelp-polarity). You can explore this work using this [Google Colab](https://colab.research.google.com/drive/1ZzYdErn0vezf5XZUGCtPuKj6a9mRkGId?usp=sharing). ## Ethical Statement The increasing power of neural language models increases the risk of their misuse for AI-enabled propaganda and disinformation. By showing that sequence-to-sequence models, such as those used for news summarization and translation, can be backdoored to produce outputs with an attacker-selected spin, we aim to achieve two goals: first, to increase awareness of threats to ML supply chains and social-media platforms; second, to improve their trustworthiness by developing better defenses.
edbeeching/test-trainer-to-hub
53347a70e2354e092a8bbb6b4bf4ef8a42bf8887
2022-02-11T10:36:07.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
edbeeching
null
edbeeching/test-trainer-to-hub
5
null
transformers
16,512
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy - f1 model-index: - name: test-trainer-to-hub results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: mrpc metrics: - name: Accuracy type: accuracy value: 0.8455882352941176 - name: F1 type: f1 value: 0.893760539629005 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test-trainer-to-hub This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.7352 - Accuracy: 0.8456 - F1: 0.8938 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 459 | 0.4489 | 0.8235 | 0.8792 | | 0.5651 | 2.0 | 918 | 0.4885 | 0.8260 | 0.8811 | | 0.3525 | 3.0 | 1377 | 0.7352 | 0.8456 | 0.8938 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.2+cu102 - Datasets 1.18.3 - Tokenizers 0.11.0
edmihranyan/roberta_large_classifier
949c952c786880570d0f66db2f102726ba53da2a
2021-12-30T14:55:33.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
edmihranyan
null
edmihranyan/roberta_large_classifier
5
null
transformers
16,513
Entry not found
edwardgowsmith/xlnet-base-cased-train-from-dev-and-test-best
93fdc4c10697837f6d12dd0b6e190e765287589e
2021-05-04T13:40:54.000Z
[ "pytorch", "xlnet", "text-classification", "transformers" ]
text-classification
false
edwardgowsmith
null
edwardgowsmith/xlnet-base-cased-train-from-dev-and-test-best
5
null
transformers
16,514
Entry not found
edwardgowsmith/xlnet-base-cased-train-from-dev-and-test-short-best
f2dba93d568d16809995d2dce86b5afadc342b90
2021-05-04T13:42:41.000Z
[ "pytorch", "xlnet", "text-classification", "transformers" ]
text-classification
false
edwardgowsmith
null
edwardgowsmith/xlnet-base-cased-train-from-dev-and-test-short-best
5
null
transformers
16,515
Entry not found
ehdwns1516/gpt3-kor-based_gpt2_review_SR4
c9517e0bdc51ac861de9633887700904e2967048
2021-07-23T01:18:45.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
ehdwns1516
null
ehdwns1516/gpt3-kor-based_gpt2_review_SR4
5
null
transformers
16,516
# ehdwns1516/gpt3-kor-based_gpt2_review_SR4 * This model has been trained Korean dataset as a star of 4 in the [naver shopping reivew dataset](https://github.com/bab2min/corpus/tree/master/sentiment). * Input text what you want to generate review. * If the context is longer than 1200 characters, the context may be cut in the middle and the result may not come out well. review generator DEMO: [Ainize DEMO](https://main-review-generator-ehdwns1516.endpoint.ainize.ai/) review generator API: [Ainize API](https://ainize.web.app/redirect?git_repo=https://github.com/ehdwns1516/review_generator) ## Model links for each 1 to 5 star * [ehdwns1516/gpt3-kor-based_gpt2_review_SR1](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR1) * [ehdwns1516/gpt3-kor-based_gpt2_review_SR2](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR2) * [ehdwns1516/gpt3-kor-based_gpt2_review_SR3](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR3) * [ehdwns1516/gpt3-kor-based_gpt2_review_SR4](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR4) * [ehdwns1516/gpt3-kor-based_gpt2_review_SR5](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR5) ## Overview Language model: [gpt3-kor-small_based_on_gpt2](https://huggingface.co/kykim/gpt3-kor-small_based_on_gpt2) Language: Korean Training data: review_body dataset with a star of 4 in the [naver shopping reivew dataset](https://github.com/bab2min/corpus/tree/master/sentiment). Code: See [Ainize Workspace](https://ainize.ai/workspace/create?imageId=hnj95592adzr02xPTqss&git=https://github.com/ehdwns1516/gpt2_review_fine-tunning_note) ## Usage ## In Transformers ``` from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("ehdwns1516/gpt3-kor-based_gpt2_review_SR4") model = AutoModelWithLMHead.from_pretrained("ehdwns1516/gpt3-kor-based_gpt2_review_SR4") generator = pipeline( "text-generation", model="ehdwns1516/gpt3-kor-based_gpt2_review_SR4", tokenizer=tokenizer ) context = "your context" result = dict() result[0] = generator(context)[0] ```
ehdwns1516/klue-roberta-base-kornli
7ca6c75b25311a95a7aee8458b63fba7c2762cec
2021-07-14T08:11:08.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
ehdwns1516
null
ehdwns1516/klue-roberta-base-kornli
5
1
transformers
16,517
# klue-roberta-base-kornli * This model trained with Korean dataset. * Input premise sentence and hypothesis sentence. * You can use English, but don't expect accuracy. * If the context is longer than 1200 characters, the context may be cut in the middle and the result may not come out well. klue-roberta-base-kornli DEMO: [Ainize DEMO](https://main-klue-roberta-base-kornli-ehdwns1516.endpoint.ainize.ai/) klue-roberta-base-kornli API: [Ainize API](https://ainize.web.app/redirect?git_repo=https://github.com/ehdwns1516/klue-roberta-base_kornli) ## Overview Language model: [klue/roberta-base](https://huggingface.co/klue/roberta-base) Language: Korean Training data: [kakaobrain KorNLI](https://github.com/kakaobrain/KorNLUDatasets/tree/master/KorNLI) Eval data: [kakaobrain KorNLI](https://github.com/kakaobrain/KorNLUDatasets/tree/master/KorNLI) Code: See [Ainize Workspace](https://ainize.ai/workspace/create?imageId=hnj95592adzr02xPTqss&git=https://github.com/ehdwns1516/klue-roberta-base_finetunning_ex) ## Usage ## In Transformers ``` from transformers import AutoTokenizer, pipeline tokenizer = AutoTokenizer.from_pretrained("ehdwns1516/klue-roberta-base-kornli") classifier = pipeline( "text-classification", model="ehdwns1516/klue-roberta-base-kornli", return_all_scores=True, ) premise = "your premise" hypothesis = "your hypothesis" result = dict() result[0] = classifier(premise + tokenizer.sep_token + hypothesis)[0] ```
eli4s/Bert-L12-h256-A4
40ba6d4cec2c6f456dd5f90701916e7675cc795c
2021-08-17T07:40:05.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
eli4s
null
eli4s/Bert-L12-h256-A4
5
null
transformers
16,518
This model was pretrained on the bookcorpus dataset using knowledge distillation. The particularity of this model is that even though it shares the same architecture as BERT, it has a hidden size of 256. Since it has 4 attention heads, the head size is 64 just as for the BERT base model. The knowledge distillation was performed using multiple loss functions. The weights of the model were initialized from scratch. PS : the tokenizer is the same as the one of the model bert-base-uncased. To load the model \& tokenizer : ````python from transformers import AutoModelForMaskedLM, BertTokenizer model_name = "eli4s/Bert-L12-h256-A4" model = AutoModelForMaskedLM.from_pretrained(model_name) tokenizer = BertTokenizer.from_pretrained(model_name) ```` To use it as a masked language model : ````python import torch sentence = "Let's have a [MASK]." model.eval() inputs = tokenizer([sentence], padding='longest', return_tensors='pt') output = model(inputs['input_ids'], attention_mask=inputs['attention_mask']) mask_index = inputs['input_ids'].tolist()[0].index(103) masked_token = output['logits'][0][mask_index].argmax(axis=-1) predicted_token = tokenizer.decode(masked_token) print(predicted_token) ```` Or we can also predict the n most relevant predictions : ````python top_n = 5 vocab_size = model.config.vocab_size logits = output['logits'][0][mask_index].tolist() top_tokens = sorted(list(range(vocab_size)), key=lambda i:logits[i], reverse=True)[:top_n] tokenizer.decode(top_tokens) ````
eliza-dukim/para-kqc-sim
42a060656adf270aa1acc6ec142b8d8fd9939215
2021-10-01T13:00:28.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
eliza-dukim
null
eliza-dukim/para-kqc-sim
5
null
transformers
16,519
Entry not found
emfa/l-lectra-danish-finetuned-hatespeech
97fe8d454b5dd708546bc5c2e59a82c39d5ade40
2021-12-06T11:14:45.000Z
[ "pytorch", "tensorboard", "electra", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
emfa
null
emfa/l-lectra-danish-finetuned-hatespeech
5
null
transformers
16,520
--- license: mit tags: - generated_from_trainer model-index: - name: l-lectra-danish-finetuned-hatespeech results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # l-lectra-danish-finetuned-hatespeech This model is for a university project and is uploaded for sharing between students. It is training on a danish hate speech labeled training set. Feel free to use it, but as of now, we don't promise any good results ;-) This model is a fine-tuned version of [Maltehb/-l-ctra-danish-electra-small-uncased](https://huggingface.co/Maltehb/-l-ctra-danish-electra-small-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2608 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 315 | 0.2561 | | 0.291 | 2.0 | 630 | 0.2491 | | 0.291 | 3.0 | 945 | 0.2434 | | 0.2089 | 4.0 | 1260 | 0.2608 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu111 - Datasets 1.16.1 - Tokenizers 0.10.3
emre/wav2vec2-large-xlsr-53-W2V2-TR-MED
b660f2347d15ecea2573622a8da6ca460fcbc8b0
2022-02-10T22:55:21.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "robust-speech-event", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
emre
null
emre/wav2vec2-large-xlsr-53-W2V2-TR-MED
5
null
transformers
16,521
--- license: apache-2.0 tags: - generated_from_trainer - robust-speech-event datasets: - common_voice model-index: - name: wav2vec2-large-xlsr-53-W2V2-TR-MED results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53-W2V2-TR-MED This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.4467 - Wer: 0.4598 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 60 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.1343 | 4.21 | 400 | 2.3674 | 1.0372 | | 0.8075 | 8.42 | 800 | 0.4583 | 0.6308 | | 0.3209 | 12.63 | 1200 | 0.4291 | 0.5531 | | 0.2273 | 16.84 | 1600 | 0.4348 | 0.5378 | | 0.1764 | 21.05 | 2000 | 0.4550 | 0.5326 | | 0.148 | 25.26 | 2400 | 0.4839 | 0.5319 | | 0.1268 | 29.47 | 2800 | 0.4515 | 0.5070 | | 0.1113 | 33.68 | 3200 | 0.4590 | 0.4930 | | 0.1025 | 37.89 | 3600 | 0.4546 | 0.4888 | | 0.0922 | 42.11 | 4000 | 0.4782 | 0.4852 | | 0.082 | 46.32 | 4400 | 0.4605 | 0.4752 | | 0.0751 | 50.53 | 4800 | 0.4358 | 0.4689 | | 0.0699 | 54.74 | 5200 | 0.4359 | 0.4629 | | 0.0633 | 58.95 | 5600 | 0.4467 | 0.4598 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.14.0 - Tokenizers 0.10.3
enelpi/med-electra-small-64k-discriminator
d7b65d2e8476d394b72602e9bf0022565b2c8719
2021-01-18T19:37:32.000Z
[ "pytorch", "electra", "pretraining", "transformers" ]
null
false
enelpi
null
enelpi/med-electra-small-64k-discriminator
5
null
transformers
16,522
Entry not found
enelpol/poleval2021-task1
d3c1957920659aeecc61db39d761462dd25947de
2021-10-25T07:21:42.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
enelpol
null
enelpol/poleval2021-task1
5
null
transformers
16,523
Entry not found
ensamblador/model_es_custom
208b79eee0751086270b8da97c2353a0e119d525
2021-05-21T15:58:58.000Z
[ "pytorch", "tf", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
false
ensamblador
null
ensamblador/model_es_custom
5
null
transformers
16,524
Entry not found
erasedwalt/rubert-base-vet
1395386a99255a60c5c58e1ea5adbf53c9cf3f9e
2022-02-18T00:19:00.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
erasedwalt
null
erasedwalt/rubert-base-vet
5
null
transformers
16,525
Entry not found
erica/krm_sa3
07f444be7a1fa55710da00412b6513bc9de7587e
2021-11-24T03:17:12.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
erica
null
erica/krm_sa3
5
null
transformers
16,526
Entry not found
ethzanalytics/ai-msgbot-gpt2-L
c12223ecf8ba78d35a070574527d17052736e080
2021-12-26T20:28:36.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
ethzanalytics
null
ethzanalytics/ai-msgbot-gpt2-L
5
null
transformers
16,527
# ai-msgbot GPT2-L _NOTE: model card is WIP_ GPT2-L (774M parameters) trained on [the Wizard of Wikipedia dataset](https://parl.ai/projects/wizard_of_wikipedia/) for 40k steps with 34/36 layers frozen using `aitextgen`. Designed for use with [ai-msgbot](https://github.com/pszemraj/ai-msgbot) to create an open-ended chatbot (of course, if other use cases arise have at it). ## conversation data The dataset was tokenized and fed to the model as a conversation between two speakers, whose names are below. this is relevant for writing prompts and filtering/extracting text from responses. `script_speaker_name` = `person alpha` `script_responder_name` = `person beta` ## examples - the default inference API examples should work _okay_ - an ideal test would be explicitly adding `person beta` to the **end** of the prompt text. The model is forced to respond to the entered chat prompt instead of adding to the entered prompt and then responding to that (which may cut off the response text due to the Inference API limits). ## citations ``` @inproceedings{dinan2019wizard, author={Emily Dinan and Stephen Roller and Kurt Shuster and Angela Fan and Michael Auli and Jason Weston}, title={{W}izard of {W}ikipedia: Knowledge-powered Conversational Agents}, booktitle = {Proceedings of the International Conference on Learning Representations (ICLR)}, year={2019}, } @inproceedings{li-etal-2017-dailydialog, title = "{D}aily{D}ialog: A Manually Labelled Multi-turn Dialogue Dataset", author = "Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi", booktitle = "Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)", month = nov, year = "2017", address = "Taipei, Taiwan", publisher = "Asian Federation of Natural Language Processing", url = "https://aclanthology.org/I17-1099", pages = "986--995", abstract = "We develop a high-quality multi-turn dialog dataset, \textbf{DailyDialog}, which is intriguing in several aspects. The language is human-written and less noisy. The dialogues in the dataset reflect our daily communication way and cover various topics about our daily life. We also manually label the developed dataset with communication intention and emotion information. Then, we evaluate existing approaches on DailyDialog dataset and hope it benefit the research field of dialog systems. The dataset is available on \url{http://yanran.li/dailydialog}", } ```
eunjin/koMHBERT-krbert-based-v2
8c62003e31a3833c8ca6529bec3b2d8520178d06
2021-06-05T17:42:17.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
eunjin
null
eunjin/koMHBERT-krbert-based-v2
5
null
transformers
16,528
korean Mental Health BERT -v2 huggingface에 공개된 KR-Medium BERT를 정신건강의학신문을 크롤링한 dataset으로 MLM fine-tuining한 Bert Model입니다. 정신건강 발화 관련 데이터를 모을 수 없는 상황에서 이를 대체할만한 데이터로 제시합니다. 향후 정신건강 관련 감정 및 상태 classification 및 그에 따른 chatbot 구현에 사용할 수 있습니다. 정신건강의학신문: http://www.psychiatricnews.net
fabriceyhc/bert-base-uncased-yelp_polarity
5793d97b55d24e604d9157704e9d3a76735515c1
2021-10-08T09:42:27.000Z
[ "pytorch", "bert", "text-classification", "dataset:yelp_polarity", "transformers", "generated_from_trainer", "sibyl", "license:apache-2.0", "model-index" ]
text-classification
false
fabriceyhc
null
fabriceyhc/bert-base-uncased-yelp_polarity
5
null
transformers
16,529
--- license: apache-2.0 tags: - generated_from_trainer - sibyl datasets: - yelp_polarity metrics: - accuracy model-index: - name: bert-base-uncased-yelp_polarity results: - task: name: Text Classification type: text-classification dataset: name: yelp_polarity type: yelp_polarity args: plain_text metrics: - name: Accuracy type: accuracy value: 0.9516052631578947 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-yelp_polarity This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the yelp_polarity dataset. It achieves the following results on the evaluation set: - Loss: 0.3222 - Accuracy: 0.9516 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 277200 - training_steps: 2772000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.8067 | 0.0 | 2000 | 0.8241 | 0.4975 | | 0.5482 | 0.01 | 4000 | 0.3507 | 0.8591 | | 0.3427 | 0.01 | 6000 | 0.3750 | 0.9139 | | 0.4133 | 0.01 | 8000 | 0.5520 | 0.9016 | | 0.4301 | 0.02 | 10000 | 0.3803 | 0.9304 | | 0.3716 | 0.02 | 12000 | 0.4168 | 0.9337 | | 0.4076 | 0.03 | 14000 | 0.5042 | 0.9170 | | 0.3674 | 0.03 | 16000 | 0.4806 | 0.9268 | | 0.3813 | 0.03 | 18000 | 0.4227 | 0.9261 | | 0.3723 | 0.04 | 20000 | 0.3360 | 0.9418 | | 0.3876 | 0.04 | 22000 | 0.3255 | 0.9407 | | 0.3351 | 0.04 | 24000 | 0.3283 | 0.9404 | | 0.34 | 0.05 | 26000 | 0.3489 | 0.9430 | | 0.3006 | 0.05 | 28000 | 0.3302 | 0.9464 | | 0.349 | 0.05 | 30000 | 0.3853 | 0.9375 | | 0.3696 | 0.06 | 32000 | 0.2992 | 0.9454 | | 0.3301 | 0.06 | 34000 | 0.3484 | 0.9464 | | 0.3151 | 0.06 | 36000 | 0.3529 | 0.9455 | | 0.3682 | 0.07 | 38000 | 0.3052 | 0.9420 | | 0.3184 | 0.07 | 40000 | 0.3323 | 0.9466 | | 0.3207 | 0.08 | 42000 | 0.3133 | 0.9532 | | 0.3346 | 0.08 | 44000 | 0.3826 | 0.9414 | | 0.3008 | 0.08 | 46000 | 0.3059 | 0.9484 | | 0.3306 | 0.09 | 48000 | 0.3089 | 0.9475 | | 0.342 | 0.09 | 50000 | 0.3611 | 0.9486 | | 0.3424 | 0.09 | 52000 | 0.3227 | 0.9445 | | 0.3044 | 0.1 | 54000 | 0.3130 | 0.9489 | | 0.3278 | 0.1 | 56000 | 0.3827 | 0.9368 | | 0.288 | 0.1 | 58000 | 0.3080 | 0.9504 | | 0.3342 | 0.11 | 60000 | 0.3252 | 0.9471 | | 0.3737 | 0.11 | 62000 | 0.4250 | 0.9343 | ### Framework versions - Transformers 4.10.2 - Pytorch 1.7.1 - Datasets 1.6.1 - Tokenizers 0.10.3
facebook/s2t-small-mustc-en-pt-st
aa13043d7a25be0e4869075b6fbd44633c1df4c9
2022-02-07T15:10:55.000Z
[ "pytorch", "tf", "speech_to_text", "automatic-speech-recognition", "en", "pt", "dataset:mustc", "arxiv:2010.05171", "arxiv:1904.08779", "transformers", "audio", "speech-translation", "license:mit" ]
automatic-speech-recognition
false
facebook
null
facebook/s2t-small-mustc-en-pt-st
5
null
transformers
16,530
--- language: - en - pt datasets: - mustc tags: - audio - speech-translation - automatic-speech-recognition license: mit pipeline_tag: automatic-speech-recognition widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac --- # S2T-SMALL-MUSTC-EN-PT-ST `s2t-small-mustc-en-pt-st` is a Speech to Text Transformer (S2T) model trained for end-to-end Speech Translation (ST). The S2T model was proposed in [this paper](https://arxiv.org/abs/2010.05171) and released in [this repository](https://github.com/pytorch/fairseq/tree/master/examples/speech_to_text) ## Model description S2T is a transformer-based seq2seq (encoder-decoder) model designed for end-to-end Automatic Speech Recognition (ASR) and Speech Translation (ST). It uses a convolutional downsampler to reduce the length of speech inputs by 3/4th before they are fed into the encoder. The model is trained with standard autoregressive cross-entropy loss and generates the transcripts/translations autoregressively. ## Intended uses & limitations This model can be used for end-to-end English speech to Portuguese text translation. See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look for other S2T checkpoints. ### How to use As this a standard sequence to sequence transformer model, you can use the `generate` method to generate the transcripts by passing the speech features to the model. *Note: The `Speech2TextProcessor` object uses [torchaudio](https://github.com/pytorch/audio) to extract the filter bank features. Make sure to install the `torchaudio` package before running this example.* You could either install those as extra speech dependancies with `pip install transformers"[speech, sentencepiece]"` or install the packages seperatly with `pip install torchaudio sentencepiece`. ```python import torch from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration from datasets import load_dataset import soundfile as sf model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-mustc-en-pt-st") processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-mustc-en-pt-st") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset( "patrickvonplaten/librispeech_asr_dummy", "clean", split="validation" ) ds = ds.map(map_to_array) inputs = processor( ds["speech"][0], sampling_rate=16_000, return_tensors="pt" ) generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask"]) translation = processor.batch_decode(generated_ids, skip_special_tokens=True) ``` ## Training data The s2t-small-mustc-en-pt-st is trained on English-Portuguese subset of [MuST-C](https://ict.fbk.eu/must-c/). MuST-C is a multilingual speech translation corpus whose size and quality facilitates the training of end-to-end systems for speech translation from English into several languages. For each target language, MuST-C comprises several hundred hours of audio recordings from English TED Talks, which are automatically aligned at the sentence level with their manual transcriptions and translations. ## Training procedure ### Preprocessing The speech data is pre-processed by extracting Kaldi-compliant 80-channel log mel-filter bank features automatically from WAV/FLAC audio files via PyKaldi or torchaudio. Further utterance-level CMVN (cepstral mean and variance normalization) is applied to each example. The texts are lowercased and tokenized using SentencePiece and a vocabulary size of 8,000. ### Training The model is trained with standard autoregressive cross-entropy loss and using [SpecAugment](https://arxiv.org/abs/1904.08779). The encoder receives speech features, and the decoder generates the transcripts autoregressively. To accelerate model training and for better performance the encoder is pre-trained for English ASR. ## Evaluation results MuST-C test results for en-pt (BLEU score): 28.1 ### BibTeX entry and citation info ```bibtex @inproceedings{wang2020fairseqs2t, title = {fairseq S2T: Fast Speech-to-Text Modeling with fairseq}, author = {Changhan Wang and Yun Tang and Xutai Ma and Anne Wu and Dmytro Okhonko and Juan Pino}, booktitle = {Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations}, year = {2020}, } ```
facebook/wav2vec2-base-10k-voxpopuli-ft-sl
1a2720ef0da3445608d52fb367a2168c76de994e
2021-07-06T01:53:05.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "sl", "arxiv:2101.00390", "transformers", "audio", "voxpopuli", "license:cc-by-nc-4.0" ]
automatic-speech-recognition
false
facebook
null
facebook/wav2vec2-base-10k-voxpopuli-ft-sl
5
null
transformers
16,531
--- language: sl tags: - audio - automatic-speech-recognition - voxpopuli license: cc-by-nc-4.0 --- # Wav2Vec2-Base-VoxPopuli-Finetuned [Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) base model pretrained on the 10K unlabeled subset of [VoxPopuli corpus](https://arxiv.org/abs/2101.00390) and fine-tuned on the transcribed data in sl (refer to Table 1 of paper for more information). **Paper**: *[VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation Learning, Semi-Supervised Learning and Interpretation](https://arxiv.org/abs/2101.00390)* **Authors**: *Changhan Wang, Morgane Riviere, Ann Lee, Anne Wu, Chaitanya Talnikar, Daniel Haziza, Mary Williamson, Juan Pino, Emmanuel Dupoux* from *Facebook AI* See the official website for more information, [here](https://github.com/facebookresearch/voxpopuli/) # Usage for inference In the following it is shown how the model can be used in inference on a sample of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets) ```python #!/usr/bin/env python3 from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC from datasets import load_dataset import torchaudio import torch # resample audio # load model & processor model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-sl") processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-sl") # load dataset ds = load_dataset("common_voice", "sl", split="validation[:1%]") # common voice does not match target sampling rate common_voice_sample_rate = 48000 target_sample_rate = 16000 resampler = torchaudio.transforms.Resample(common_voice_sample_rate, target_sample_rate) # define mapping fn to read in sound file and resample def map_to_array(batch): speech, _ = torchaudio.load(batch["path"]) speech = resampler(speech) batch["speech"] = speech[0] return batch # load all audio files ds = ds.map(map_to_array) # run inference on the first 5 data samples inputs = processor(ds[:5]["speech"], sampling_rate=target_sample_rate, return_tensors="pt", padding=True) # inference logits = model(**inputs).logits predicted_ids = torch.argmax(logits, axis=-1) print(processor.batch_decode(predicted_ids)) ```
facebook/wav2vec2-base-sv-voxpopuli
7200928b87656cb7a5ff194b8d50faa2bc39c259
2021-07-06T01:55:30.000Z
[ "pytorch", "wav2vec2", "pretraining", "sv", "arxiv:2101.00390", "transformers", "audio", "automatic-speech-recognition", "voxpopuli", "license:cc-by-nc-4.0" ]
automatic-speech-recognition
false
facebook
null
facebook/wav2vec2-base-sv-voxpopuli
5
null
transformers
16,532
--- language: sv tags: - audio - automatic-speech-recognition - voxpopuli license: cc-by-nc-4.0 --- # Wav2Vec2-Base-VoxPopuli [Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) base model pretrained on the sv unlabeled subset of [VoxPopuli corpus](https://arxiv.org/abs/2101.00390). **Paper**: *[VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation Learning, Semi-Supervised Learning and Interpretation](https://arxiv.org/abs/2101.00390)* **Authors**: *Changhan Wang, Morgane Riviere, Ann Lee, Anne Wu, Chaitanya Talnikar, Daniel Haziza, Mary Williamson, Juan Pino, Emmanuel Dupoux* from *Facebook AI* See the official website for more information, [here](https://github.com/facebookresearch/voxpopuli/) # Fine-Tuning Please refer to [this blog](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) on how to fine-tune this model on a specific language. Note that you should replace `"facebook/wav2vec2-large-xlsr-53"` with this checkpoint for fine-tuning.
fadhilarkan/qa-indo-k
f5d01aa1e86704736ca8fac4092073fd68673589
2021-08-22T17:51:15.000Z
[ "pytorch", "albert", "question-answering", "transformers", "model-index", "autotrain_compatible" ]
question-answering
false
fadhilarkan
null
fadhilarkan/qa-indo-k
5
null
transformers
16,533
--- model-index: - name: qa-indo-k --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # qa-indo-k This model was trained from scratch on an unkown dataset. It achieves the following results on the evaluation set: - Loss: 2.4984 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2537 | 1.0 | 8209 | 1.9642 | | 0.943 | 2.0 | 16418 | 2.2143 | | 0.6694 | 3.0 | 24627 | 2.4984 | ### Framework versions - Transformers 4.6.1 - Pytorch 1.7.0 - Datasets 1.11.0 - Tokenizers 0.10.3
ffsouza/tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro
e5efd832a747cc4b8a0dd4a612e654aa8b1bb94f
2021-11-30T17:39:53.000Z
[ "pytorch", "tensorboard", "mbart", "text2text-generation", "dataset:wmt16_en_ro_pre_processed", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
ffsouza
null
ffsouza/tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro
5
null
transformers
16,534
--- tags: - generated_from_trainer datasets: - wmt16_en_ro_pre_processed metrics: - bleu model-index: - name: tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: wmt16_en_ro_pre_processed type: wmt16_en_ro_pre_processed args: enro metrics: - name: Bleu type: bleu value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro This model is a fine-tuned version of [sshleifer/tiny-mbart](https://huggingface.co/sshleifer/tiny-mbart) on the wmt16_en_ro_pre_processed dataset. It achieves the following results on the evaluation set: - Loss: 8.5137 - Bleu: 0.0 - Gen Len: 20.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:----:|:-------:| | 8.2817 | 1.0 | 76290 | 8.5137 | 0.0 | 20.0 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
flax-community/bert-swahili-news-classification
4b80bc7a9733f98705620d8ed48b022d6b9940fb
2021-07-25T11:45:43.000Z
[ "pytorch", "jax", "tensorboard", "bert", "text-classification", "sw", "dataset:flax-community/swahili-safi", "transformers" ]
text-classification
false
flax-community
null
flax-community/bert-swahili-news-classification
5
null
transformers
16,535
--- language: sw widget: - text: "Idris ameandika kwenye ukurasa wake wa Instagram akimkumbusha Diamond kutekeleza ahadi yake kumpigia Zari magoti kumuomba msamaha kama alivyowahi kueleza awali.Idris ameandika;" datasets: - flax-community/swahili-safi --- ## Swahili News Classification with BERT This model was trained using HuggingFace's Flax framework and is part of the [JAX/Flax Community Week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104) organized by [HuggingFace](https://huggingface.co). All training was done on a TPUv3-8 VM sponsored by the Google Cloud team. This [model](https://huggingface.co/flax-community/bert-base-uncased-swahili) was used as the base and fine-tuned for this task. ## How to use ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("flax-community/bert-swahili-news-classification") model = AutoModelForSequenceClassification.from_pretrained("flax-community/bert-swahili-news-classification") ``` ``` Eval metrics (10% valid set): {'accuracy': 0.9114740008594757} ```
flax-community/gpt-2-tamil
29823bd79b5813f6514d013d8a4c8e488a775389
2021-07-18T16:03:33.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "ta", "dataset:oscar", "dataset:IndicNLP", "transformers" ]
text-generation
false
flax-community
null
flax-community/gpt-2-tamil
5
null
transformers
16,536
--- language: ta datasets: - oscar - IndicNLP widget: - text: 'ஒரு ஊரிலே ஒரு காக்கைக்கு' --- # GPT2-Tamil This repository is created as part of the Flax/Jax community week by Huggingface. The aim of this project is to pretrain a language model using GPT-2 specifically for Tamil language. ## Setup: To setup the project, run the following command, ```python pip install -r requirements.txt ``` ## Model: Pretrained model on Tamil language using a causal language modeling (CLM) objective. ## Dataset Used: The GTP-2 model is trained on [oscar dataset - ta](https://huggingface.co/datasets/oscar) ## Intended uses & limitations: You can use the raw model for next sentence prediction, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=gpt) to look for fine-tuned versions on a task that interests you. ## How to pretrain the model: To perform training, do the following steps, - Export the model directory (where you want to store the model artifacts like config, tokenizer, etc.) ```python >>> export MODEL_DIR=<model_dir> ``` - Create the config.json by running the following command, ```python >>> python src/create_config.py ``` - Create the tokenizer by running the following command, ```python >>> python src/train_tokenizer.py ``` - Once the config and tokenizer is created, run the following script to start training the flax model ```python >>> python scripts/train_gpt2-oscar-tamil.sh ``` ## How to use: To perform language generation using the model, pipeline can be used directly. - First convert the flax model to pytorch using the following command, ```python python src/convert_flax_to_pytorch.py ``` - Use the following snippet to perform language generation, ```python >>> from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline >>> model_name = 'abinayam/gpt-2-tamil' >>> model = AutoModelWithLMHead.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) >>> set_seed(42) >>> input_text = "ஒரு ஊரிலே ஒரு காக்கைக்கு" >>> max_len = 300 >>> no_seq = 5 >>> generator = pipeline('text-generation', model=model, tokenizer=tokenizer) >>> sequence = generator(input_text, max_length=max_len, num_return_sequences=no_seq) ```
flax-community/gpt-neo-125M-code-search-all
4c8bde7193f2631b34f269b47e5f662fd5543859
2021-07-26T14:07:11.000Z
[ "pytorch", "jax", "tensorboard", "gpt_neo", "text-generation", "transformers" ]
text-generation
false
flax-community
null
flax-community/gpt-neo-125M-code-search-all
5
null
transformers
16,537
# GPT-Code-Clippy-125M-Code-Search-All > **Please refer to our new [GitHub Wiki](https://github.com/ncoop57/gpt-code-clippy/wiki) which documents our efforts in detail in creating the open source version of GitHub Copilot** ## Model Description GPT-CC-125M-Code-Search is a [GPT-Neo-125M model](https://huggingface.co/EleutherAI/gpt-neo-125M) finetuned using causal language modeling on all languages in the [CodeSearchNet Challenge dataset](https://huggingface.co/datasets/code_search_net). This model is specialized to autocomplete methods in multiple programming languages. ## Training data [CodeSearchNet Challenge dataset](https://huggingface.co/datasets/code_search_net). ## Training procedure The training script used to train this model can be found [here](https://github.com/ncoop57/gpt-code-clippy/blob/camera-ready/training/run_clm_flax.py). ```bash ./run_clm_flax.py \ --output_dir $HOME/gpt-neo-125M-code-search-all \ --model_name_or_path="EleutherAI/gpt-neo-125M" \ --dataset_name code_search_net \ --dataset_config_name="all" \ --do_train --do_eval \ --block_size="512" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="64" \ --preprocessing_num_workers="8" \ --learning_rate="1.2e-4" \ --num_train_epochs 20 \ --warmup_steps 3000 \ --adam_beta1="0.9" \ --adam_beta2="0.95" \ --weight_decay="0.1" \ --overwrite_output_dir \ --logging_steps="25" \ --eval_steps="500" \ --push_to_hub="False" \ --report_to="all" \ --dtype="bfloat16" \ --skip_memory_metrics="True" \ --save_steps="500" \ --save_total_limit 10 \ --report_to="wandb" \ --run_name="gpt-neo-125M-code-search-all" ``` ## Intended Use and Limitations The model is finetuned methods from several languages and is intended to autocomplete methods given some prompt (method signature and docstring). ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, FlaxAutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("flax-community/gpt-neo-125M-code-clippy-code-search-all") tokenizer = AutoTokenizer.from_pretrained("flax-community/gpt-neo-125M-code-clippy-code-search-all") prompt = """def greet(name): '''A function to greet user. Given a user name it should say hello''' """ input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to(device) start = input_ids.size(1) out = model.generate(input_ids, do_sample=True, max_length=50, num_beams=2, early_stopping=True, eos_token_id=tokenizer.eos_token_id, ) print(tokenizer.decode(out[0][start:])) ``` ### Limitations and Biases The model is intended to be used for research purposes and comes with no guarantees of quality of generated code. GPT-CC is finetuned from GPT-Neo and might have inherited biases and limitations from it. See [GPT-Neo model card](https://huggingface.co/EleutherAI/gpt-neo-125M#limitations-and-biases) for details. ## Eval results Coming soon...
flax-community/gpt2-swahili
8ca5ef61a032d60bc0cd82209a084186523cb282
2021-07-25T16:22:24.000Z
[ "pytorch", "jax", "tensorboard", "gpt2", "text-generation", "sw", "dataset:flax-community/swahili-safi", "transformers" ]
text-generation
false
flax-community
null
flax-community/gpt2-swahili
5
null
transformers
16,538
--- language: sw widget: - text: "Ninitaka kukula" datasets: - flax-community/swahili-safi --- ## GPT2 in Swahili This model was trained using HuggingFace's Flax framework and is part of the [JAX/Flax Community Week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104) organized by [HuggingFace](https://huggingface.co). All training was done on a TPUv3-8 VM sponsored by the Google Cloud team. ## How to use ```python from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("flax-community/gpt2-swahili") model = AutoModelWithLMHead.from_pretrained("flax-community/gpt2-swahili") print(round((model.num_parameters())/(1000*1000)),"Million Parameters") 124 Million Parameters ``` #### **Training Data**: This model was trained on [Swahili Safi](https://huggingface.co/datasets/flax-community/swahili-safi) #### **More Details**: For more details and Demo please check [HF Swahili Space](https://huggingface.co/spaces/flax-community/Swahili)
flax-community/t5-base-dutch
e2af9feca0dbc43ba037a85f2e5c6f8865a258fa
2022-01-11T12:10:22.000Z
[ "pytorch", "tf", "jax", "tensorboard", "t5", "text2text-generation", "dutch", "dataset:yhavinga/mc4_nl_cleaned", "transformers", "seq2seq", "lm-head", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
flax-community
null
flax-community/t5-base-dutch
5
2
transformers
16,539
--- language: - dutch tags: - seq2seq - lm-head datasets: - yhavinga/mc4_nl_cleaned license: apache-2.0 inference: false --- # t5-base-dutch Created by [Yeb Havinga](https://www.linkedin.com/in/yeb-havinga-86530825/) & [Dat Nguyen](https://www.linkedin.com/in/dat-nguyen-49a641138/) during the [Hugging Face community week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104), organized by [HuggingFace](https://huggingface.co/) and TPU usage sponsored by Google, for the project [Pre-train T5 from scratch in Dutch](https://discuss.huggingface.co/t/pretrain-t5-from-scratch-in-dutch/8109). See also the fine-tuned [t5-base-dutch-demo](https://huggingface.co/flax-community/t5-base-dutch-demo) model, and the demo application **[Netherformer 📰](https://huggingface.co/spaces/flax-community/netherformer)**, that are based on this model. **5 jan 2022: Model updated. Evaluation accuracy increased from 0.64 to 0.70.** **11 jan 2022: See also [yhavinga/t5-v1.1-base-dutch-cased](https://huggingface.co/yhavinga/t5-v1.1-base-dutch-cased) with eval acc 0.78** ## Model * Configuration based on `google/t5-base` * 12 layers, 12 heads * Dropout set to 0.1 ## Dataset This model was trained on the `full` configuration of [cleaned Dutch mC4](https://huggingface.co/datasets/yhavinga/mc4_nl_cleaned), which is the original mC4, except * Documents that contained words from a selection of the Dutch and English [List of Dirty Naught Obscene and Otherwise Bad Words](https://github.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words) are removed * Sentences with less than 3 words are removed * Sentences with a word of more than 1000 characters are removed * Documents with less than 5 sentences are removed * Documents with "javascript", "lorum ipsum", "terms of use", "privacy policy", "cookie policy", "uses cookies", "use of cookies", "use cookies", "elementen ontbreken", "deze printversie" are removed. ## Tokenization A SentencePiece tokenizer was trained from scratch on this dataset. The total tokens of the `full` configuration is 34B ## Training The model was trained on the `full` mc4_nl_cleaned dataset configuration for 1 epoch, consisting of 34B tokens, for 528 482 steps with a batch size of 128 and took 57 hours. A triangle learning rate schedule was used, with peak learning rate 0.005. ## Evaluation * Loss: 1.38 * Accuracy: 0.70
flax-sentence-embeddings/all_datasets_v3_MiniLM-L6
0ba8acf137cc901948b5ffe819d9832d7fd727e5
2021-07-23T15:53:06.000Z
[ "pytorch", "bert", "feature-extraction", "en", "arxiv:2104.08727", "arxiv:1810.09305", "arxiv:2102.07033", "arxiv:1904.06472", "sentence-transformers", "sentence-similarity" ]
sentence-similarity
false
flax-sentence-embeddings
null
flax-sentence-embeddings/all_datasets_v3_MiniLM-L6
5
null
sentence-transformers
16,540
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity language: en --- # Model description The project aims to train sentence embedding models on very large sentence level datasets using a self-supervised contrastive learning objective. We used the pretrained ['MiniLM-L6-H384-uncased'](https://huggingface.co/nreimers/MiniLM-L6-H384-uncased) model and fine-tuned in on a 1B sentence pairs dataset. We use a contrastive learning objective: given a sentence from the pair, the model should predict which out of a set of randomly sampled other sentences, was actually paired with it in our dataset. We developped this model during the [Community week using JAX/Flax for NLP & CV](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104), organized by Hugging Face. We developped this model as part of the project: [Train the Best Sentence Embedding Model Ever with 1B Training Pairs](https://discuss.huggingface.co/t/train-the-best-sentence-embedding-model-ever-with-1b-training-pairs/7354). We benefited from efficient hardware infrastructure to run the project: 7 TPUs v3-8, as well as intervention from Google’s Flax, JAX, and Cloud team member about efficient deep learning frameworks. ## Intended uses Our model is intented to be used as a sentence encoder. Given an input sentence, it ouptuts a vector which captures the sentence semantic information. The sentence vector may be used for information retrieval, clustering or sentence similarity tasks. ## How to use Here is how to use this model to get the features of a given text using [SentenceTransformers](https://github.com/UKPLab/sentence-transformers) library: ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer('flax-sentence-embeddings/all_datasets_v3_MiniLM-L6') text = "Replace me by any text you'd like." text_embbedding = model.encode(text) # array([-0.01559514, 0.04046123, 0.1317083 , 0.00085931, 0.04585106, # -0.05607086, 0.0138078 , 0.03569756, 0.01420381, 0.04266302 ...], # dtype=float32) ``` # Training procedure ## Pre-training We use the pretrained ['MiniLM-L6-H384-uncased'](https://huggingface.co/nreimers/MiniLM-L6-H384-uncased) which is a 6 layer version of ['microsoft/MiniLM-L12-H384-uncased'](https://huggingface.co/microsoft/MiniLM-L12-H384-uncased) by keeping only every second layer. Please refer to the model card for more detailed information about the pre-training procedure. ## Fine-tuning We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each possible sentence pairs from the batch. We then apply the cross entropy loss by comparing with true pairs. ### Hyper parameters We trained ou model on a TPU v3-8. We train the model during 540k steps using a batch size of 1024 (128 per TPU core). We use a learning rate warm up of 500. The sequence length was limited to 128 tokens. We used the AdamW optimizer with a 2e-5 learning rate. The full training script is accessible in this current repository. ### Training data We use the concatenation from multiple datasets to fine-tune our model. The total number of sentence pairs is above 1 billion sentences. We sampled each dataset given a weighted probability which configuration is detailed in the `data_config.json` file. | Dataset | Paper | Number of training tuples | |:--------------------------------------------------------:|:----------------------------------------:|:--------------------------:| | [GOOAQ: Open Question Answering with Diverse Answer Types](https://github.com/allenai/gooaq) | [paper](https://arxiv.org/pdf/2104.08727.pdf) | 3,012,496 | | [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_title_body_jsonl) | - | 364,001 | | [Flickr 30k](https://shannon.cs.illinois.edu/DenotationGraph/) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/229/33) | 317,695 | | [COCO 2020](COCO 2020) | [paper](https://link.springer.com/chapter/10.1007%2F978-3-319-10602-1_48) | 828,395| | [Code Search](https://huggingface.co/datasets/code_search_net) | - | 1,151,414 | | [TriviaqQA](https://huggingface.co/datasets/trivia_qa) | - | 73,346 | | [SQuAD2.0](https://rajpurkar.github.io/SQuAD-explorer/) | [paper](https://aclanthology.org/P18-2124.pdf) | 87,599 | | [Natural Questions (NQ)](https://ai.google.com/research/NaturalQuestions) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/1455) | 100,231 | | [Simple Wikipedia](https://cs.pomona.edu/~dkauchak/simplification/) | [paper](https://www.aclweb.org/anthology/P11-2117/) | 102,225 | | [Quora Question Pairs](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) | - | 103,663 | | [Altlex](https://github.com/chridey/altlex/) | [paper](https://aclanthology.org/P16-1135.pdf) | 112,696 | | [Wikihow](https://github.com/pvl/wikihow_pairs_dataset) | [paper](https://arxiv.org/abs/1810.09305) | 128,542 | | [Sentence Compression](https://github.com/google-research-datasets/sentence-compression) | [paper](https://www.aclweb.org/anthology/D13-1155/) | 180,000 | | AllNLI ([SNLI](https://nlp.stanford.edu/projects/snli/) and [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) | [paper SNLI](https://doi.org/10.18653/v1/d15-1075), [paper MultiNLI](https://doi.org/10.18653/v1/n18-1101) | 277,230 | | [Eli5](https://huggingface.co/datasets/eli5) | [paper](https://doi.org/10.18653/v1/p19-1346) | 325,475 | | [SPECTER](https://github.com/allenai/specter) | [paper](https://doi.org/10.18653/v1/2020.acl-main.207) | 684,100 | | [S2ORC](https://github.com/allenai/s2orc) Title/Abstract | [paper](https://aclanthology.org/2020.acl-main.447/) | 41,769,185 | | [S2ORC](https://github.com/allenai/s2orc) Citation/Citation | [paper](https://aclanthology.org/2020.acl-main.447/) | 52,603,982 | | [S2ORC](https://github.com/allenai/s2orc) Citation/Abstract | [paper](https://aclanthology.org/2020.acl-main.447/) | 116,288,806 | | [PAQ](https://github.com/facebookresearch/PAQ) | [paper](https://arxiv.org/abs/2102.07033) | 64,371,441 | | [WikiAnswers](https://github.com/afader/oqa#wikianswers-corpus) | [paper](https://doi.org/10.1145/2623330.2623677) | 77,427,422 | | SearchQA | - | 582,261 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) Title/Answer | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 1,198,260 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) Title/Question | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 659,896 | | [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) Question/Answer | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 681,164 | | [MS MARCO](https://microsoft.github.io/msmarco/) | [paper](https://doi.org/10.1145/3404835.3462804) | 9,144,553 | | [Reddit conversationnal](https://github.com/PolyAI-LDN/conversational-datasets/tree/master/reddit) | [paper](https://arxiv.org/abs/1904.06472) | 726,484,430 | | total | | 1,097,953,922 |
frizwankhan/tokenization_model
60b1ccfbe6855149c91b6ef27e4d73a43506c781
2022-01-13T13:45:30.000Z
[ "pytorch", "layoutlmv2", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
frizwankhan
null
frizwankhan/tokenization_model
5
null
transformers
16,541
Entry not found
gaochangkuan/model_dir
2c8dbe003ce27d5b7410879d4f0dccfcc231ecc4
2021-05-21T16:10:50.000Z
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
false
gaochangkuan
null
gaochangkuan/model_dir
5
null
transformers
16,542
## Generating Chinese poetry by topic. ```python from transformers import * tokenizer = BertTokenizer.from_pretrained("gaochangkuan/model_dir") model = AutoModelWithLMHead.from_pretrained("gaochangkuan/model_dir") prompt= '''<s>田园躬耕''' length= 84 stop_token='</s>' temperature = 1.2 repetition_penalty=1.3 k= 30 p= 0.95 device ='cuda' seed=2020 no_cuda=False prompt_text = prompt if prompt else input("Model prompt >>> ") encoded_prompt = tokenizer.encode( '<s>'+prompt_text+'<sep>', add_special_tokens=False, return_tensors="pt" ) encoded_prompt = encoded_prompt.to(device) output_sequences = model.generate( input_ids=encoded_prompt, max_length=length, min_length=10, do_sample=True, early_stopping=True, num_beams=10, temperature=temperature, top_k=k, top_p=p, repetition_penalty=repetition_penalty, bad_words_ids=None, bos_token_id=tokenizer.bos_token_id, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, length_penalty=1.2, no_repeat_ngram_size=2, num_return_sequences=1, attention_mask=None, decoder_start_token_id=tokenizer.bos_token_id,) generated_sequence = output_sequences[0].tolist() text = tokenizer.decode(generated_sequence) text = text[: text.find(stop_token) if stop_token else None] print(''.join(text).replace(' ','').replace('<pad>','').replace('<s>','')) ```
gchhablani/bert-large-cased-finetuned-cola
7c325c74302ec1f4bccdee38c236f62db1b5f1b1
2021-09-21T04:06:19.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
gchhablani
null
gchhablani/bert-large-cased-finetuned-cola
5
null
transformers
16,543
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: bert-large-cased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: GLUE COLA type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5957317644481708 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-large-cased-finetuned-cola This model is a fine-tuned version of [bert-large-cased](https://huggingface.co/bert-large-cased) on the GLUE COLA dataset. It achieves the following results on the evaluation set: - Loss: 0.8385 - Matthews Correlation: 0.5957 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5533 | 1.0 | 2138 | 0.7943 | 0.4439 | | 0.5004 | 2.0 | 4276 | 0.7272 | 0.5678 | | 0.2865 | 3.0 | 6414 | 0.8385 | 0.5957 | ### Framework versions - Transformers 4.11.0.dev0 - Pytorch 1.9.0 - Datasets 1.12.1 - Tokenizers 0.10.3
gchhablani/bert-large-cased-finetuned-wnli
72dc38be4edaa48d424538b7a49726588202a634
2021-09-23T05:10:44.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
gchhablani
null
gchhablani/bert-large-cased-finetuned-wnli
5
null
transformers
16,544
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: bert-large-cased-finetuned-wnli results: - task: name: Text Classification type: text-classification dataset: name: GLUE WNLI type: glue args: wnli metrics: - name: Accuracy type: accuracy value: 0.352112676056338 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-large-cased-finetuned-wnli This model is a fine-tuned version of [bert-large-cased](https://huggingface.co/bert-large-cased) on the GLUE WNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.7087 - Accuracy: 0.3521 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Accuracy | Validation Loss | |:-------------:|:-----:|:----:|:--------:|:---------------:| | 0.7114 | 1.0 | 159 | 0.5634 | 0.6923 | | 0.7141 | 2.0 | 318 | 0.5634 | 0.6895 | | 0.7063 | 3.0 | 477 | 0.5634 | 0.6930 | | 0.712 | 4.0 | 636 | 0.4507 | 0.7077 | | 0.7037 | 5.0 | 795 | 0.3521 | 0.7087 | ### Framework versions - Transformers 4.11.0.dev0 - Pytorch 1.9.0 - Datasets 1.12.1 - Tokenizers 0.10.3
gchhablani/fnet-large-finetuned-cola-copy
d3cb3602806822b4ac2a6fa5f1c2ad5445547001
2021-10-10T05:39:18.000Z
[ "pytorch", "tensorboard", "fnet", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
gchhablani
null
gchhablani/fnet-large-finetuned-cola-copy
5
null
transformers
16,545
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: fnet-large-finetuned-cola-copy results: - task: name: Text Classification type: text-classification dataset: name: GLUE COLA type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fnet-large-finetuned-cola-copy This model is a fine-tuned version of [google/fnet-large](https://huggingface.co/google/fnet-large) on the GLUE COLA dataset. It achieves the following results on the evaluation set: - Loss: 0.6243 - Matthews Correlation: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.6195 | 1.0 | 2138 | 0.6527 | 0.0 | | 0.6168 | 2.0 | 4276 | 0.6259 | 0.0 | | 0.616 | 3.0 | 6414 | 0.6243 | 0.0 | ### Framework versions - Transformers 4.11.0.dev0 - Pytorch 1.9.0 - Datasets 1.12.1 - Tokenizers 0.10.3
gchhablani/fnet-large-finetuned-cola-copy2
c032b807f6ad48caac37774d782ea3928a3bc882
2021-10-10T07:23:36.000Z
[ "pytorch", "tensorboard", "fnet", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
gchhablani
null
gchhablani/fnet-large-finetuned-cola-copy2
5
null
transformers
16,546
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: fnet-large-finetuned-cola-copy2 results: - task: name: Text Classification type: text-classification dataset: name: GLUE COLA type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fnet-large-finetuned-cola-copy2 This model is a fine-tuned version of [google/fnet-large](https://huggingface.co/google/fnet-large) on the GLUE COLA dataset. It achieves the following results on the evaluation set: - Loss: 0.6173 - Matthews Correlation: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.6192 | 1.0 | 2138 | 0.6443 | 0.0 | | 0.6177 | 2.0 | 4276 | 0.6296 | 0.0 | | 0.6128 | 3.0 | 6414 | 0.6173 | 0.0 | ### Framework versions - Transformers 4.11.0.dev0 - Pytorch 1.9.0 - Datasets 1.12.1 - Tokenizers 0.10.3
gchhablani/fnet-large-finetuned-cola-copy4
cdee9e3a7233456950febba1dfe27f8a5ff0db27
2021-10-10T19:30:36.000Z
[ "pytorch", "tensorboard", "fnet", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
gchhablani
null
gchhablani/fnet-large-finetuned-cola-copy4
5
null
transformers
16,547
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: fnet-large-finetuned-cola-copy4 results: - task: name: Text Classification type: text-classification dataset: name: GLUE COLA type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fnet-large-finetuned-cola-copy4 This model is a fine-tuned version of [google/fnet-large](https://huggingface.co/google/fnet-large) on the GLUE COLA dataset. It achieves the following results on the evaluation set: - Loss: 0.6500 - Matthews Correlation: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: polynomial - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.6345 | 1.0 | 2138 | 0.6611 | 0.0 | | 0.6359 | 2.0 | 4276 | 0.6840 | 0.0 | | 0.6331 | 3.0 | 6414 | 0.6500 | 0.0 | ### Framework versions - Transformers 4.11.0.dev0 - Pytorch 1.9.0 - Datasets 1.12.1 - Tokenizers 0.10.3
gchhablani/fnet-large-finetuned-mrpc
bc05e37c8f1e0d5235d9522f854d9be9827bc133
2021-09-22T09:06:01.000Z
[ "pytorch", "tensorboard", "fnet", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
gchhablani
null
gchhablani/fnet-large-finetuned-mrpc
5
null
transformers
16,548
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy - f1 model-index: - name: fnet-large-finetuned-mrpc results: - task: name: Text Classification type: text-classification dataset: name: GLUE MRPC type: glue args: mrpc metrics: - name: Accuracy type: accuracy value: 0.8259803921568627 - name: F1 type: f1 value: 0.8798646362098139 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fnet-large-finetuned-mrpc This model is a fine-tuned version of [google/fnet-large](https://huggingface.co/google/fnet-large) on the GLUE MRPC dataset. It achieves the following results on the evaluation set: - Loss: 1.0872 - Accuracy: 0.8260 - F1: 0.8799 - Combined Score: 0.8529 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------------:| | 0.5656 | 1.0 | 917 | 0.6999 | 0.7843 | 0.8581 | 0.8212 | | 0.3874 | 2.0 | 1834 | 0.7280 | 0.8088 | 0.8691 | 0.8390 | | 0.1627 | 3.0 | 2751 | 1.1274 | 0.8162 | 0.8780 | 0.8471 | | 0.0751 | 4.0 | 3668 | 1.0289 | 0.8333 | 0.8870 | 0.8602 | | 0.0339 | 5.0 | 4585 | 1.0872 | 0.8260 | 0.8799 | 0.8529 | ### Framework versions - Transformers 4.11.0.dev0 - Pytorch 1.9.0 - Datasets 1.12.1 - Tokenizers 0.10.3
georeactor/arabic-flip-mt5
3c599f6aa354b1ccc6f721965736931d2591fe05
2022-02-13T03:49:09.000Z
[ "pytorch", "mt5", "text2text-generation", "ar", "transformers", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
georeactor
null
georeactor/arabic-flip-mt5
5
1
transformers
16,549
--- language: ar license: apache-2.0 --- # Arabic-Flip-mT5 An [mT5-small](https://huggingface.co/google/mt5-small) model fine-tuned on the Arabic Parallel Gender Corpus v2.0. You should be able to do gender inflection (M->F or F->M) in Arabic. Uses: Data augmentation, detecting gender bias of a model, obfuscating gender of a user/customer. ## Training Notebook https://colab.research.google.com/drive/1w3XY9yClfDo1T-1mc19W39i2aceK2d1D?usp=sharing ## Caveats - The Parallel Gender Corpus is trained on subtitles with mostly first- and second-person sentences, so it is less likely to handle third-person references or longer texts. - Used mT5-small and batch size of 4, due to CoLab GPU constraints; should ++ - Ran 1 epoch due to an issue with CoLab
giacomomiolo/electramed_base_scivocab_970k
da58fcc1fb4dd7a261afd616663db722a06801d0
2020-10-02T14:55:30.000Z
[ "pytorch", "tf", "electra", "pretraining", "transformers" ]
null
false
giacomomiolo
null
giacomomiolo/electramed_base_scivocab_970k
5
null
transformers
16,550
Entry not found
giganticode/bert-base-StackOverflow-comments_2M
7c339826e2dfb9fc79c528c262b99ae51ef87797
2021-10-25T13:01:17.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
giganticode
null
giganticode/bert-base-StackOverflow-comments_2M
5
null
transformers
16,551
Entry not found
glasses/efficientnet_b3
bc3320c99442951b700a7a211e732fd20ad645c1
2021-12-01T08:08:37.000Z
[ "pytorch", "arxiv:1905.11946", "transformers" ]
null
false
glasses
null
glasses/efficientnet_b3
5
null
transformers
16,552
# efficientnet_b3 Implementation of EfficientNet proposed in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) ![image](https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNet.png?raw=true) The basic architecture is similar to MobileNetV2 as was computed by using [Progressive Neural Architecture Search](https://arxiv.org/abs/1905.11946) . The following table shows the basic architecture (EfficientNet-efficientnet\_b0): ![image](https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNetModelsTable.jpeg?raw=true) Then, the architecture is scaled up from [-efficientnet\_b0]{.title-ref} to [-efficientnet\_b7]{.title-ref} using compound scaling. ![image](https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNetScaling.jpg?raw=true) ``` python EfficientNet.efficientnet_b0() EfficientNet.efficientnet_b1() EfficientNet.efficientnet_b2() EfficientNet.efficientnet_b3() EfficientNet.efficientnet_b4() EfficientNet.efficientnet_b5() EfficientNet.efficientnet_b6() EfficientNet.efficientnet_b7() EfficientNet.efficientnet_b8() EfficientNet.efficientnet_l2() ``` Examples: ``` python EfficientNet.efficientnet_b0(activation = nn.SELU) # change number of classes (default is 1000 ) EfficientNet.efficientnet_b0(n_classes=100) # pass a different block EfficientNet.efficientnet_b0(block=...) # store each feature x = torch.rand((1, 3, 224, 224)) model = EfficientNet.efficientnet_b0() # first call .features, this will activate the forward hooks and tells the model you'll like to get the features model.encoder.features model(torch.randn((1,3,224,224))) # get the features from the encoder features = model.encoder.features print([x.shape for x in features]) # [torch.Size([1, 32, 112, 112]), torch.Size([1, 24, 56, 56]), torch.Size([1, 40, 28, 28]), torch.Size([1, 80, 14, 14])] ```
glasses/efficientnet_b6
4a035e9ea60d520f14e70fa9b2169fd4cf96d896
2021-04-22T18:00:19.000Z
[ "pytorch", "arxiv:1905.11946", "transformers" ]
null
false
glasses
null
glasses/efficientnet_b6
5
1
transformers
16,553
# efficientnet_b6 Implementation of EfficientNet proposed in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) ![image](https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNet.png?raw=true) The basic architecture is similar to MobileNetV2 as was computed by using [Progressive Neural Architecture Search](https://arxiv.org/abs/1905.11946) . The following table shows the basic architecture (EfficientNet-efficientnet\_b0): ![image](https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNetModelsTable.jpeg?raw=true) Then, the architecture is scaled up from [-efficientnet\_b0]{.title-ref} to [-efficientnet\_b7]{.title-ref} using compound scaling. ![image](https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/EfficientNetScaling.jpg?raw=true) ``` python EfficientNet.efficientnet_b0() EfficientNet.efficientnet_b1() EfficientNet.efficientnet_b2() EfficientNet.efficientnet_b3() EfficientNet.efficientnet_b4() EfficientNet.efficientnet_b5() EfficientNet.efficientnet_b6() EfficientNet.efficientnet_b7() EfficientNet.efficientnet_b8() EfficientNet.efficientnet_l2() ``` Examples: ``` python EfficientNet.efficientnet_b0(activation = nn.SELU) # change number of classes (default is 1000 ) EfficientNet.efficientnet_b0(n_classes=100) # pass a different block EfficientNet.efficientnet_b0(block=...) # store each feature x = torch.rand((1, 3, 224, 224)) model = EfficientNet.efficientnet_b0() # first call .features, this will activate the forward hooks and tells the model you'll like to get the features model.encoder.features model(torch.randn((1,3,224,224))) # get the features from the encoder features = model.encoder.features print([x.shape for x in features]) # [torch.Size([1, 32, 112, 112]), torch.Size([1, 24, 56, 56]), torch.Size([1, 40, 28, 28]), torch.Size([1, 80, 14, 14])] ```
gniemiec/t5-small-finetuned-xsum
d95aa2d2baf050760dc52799a34dad5e04eaecd9
2021-09-20T11:36:55.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
gniemiec
null
gniemiec/t5-small-finetuned-xsum
5
null
transformers
16,554
--- license: apache-2.0 tags: - generated_from_trainer datasets: - xsum metrics: - rouge model-index: - name: t5-small-finetuned-xsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: xsum type: xsum args: default metrics: - name: Rouge1 type: rouge value: 23.0533 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-xsum This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the xsum dataset. It achieves the following results on the evaluation set: - Loss: 2.7967 - Rouge1: 23.0533 - Rouge2: 3.912 - Rougel: 17.8534 - Rougelsum: 17.8581 - Gen Len: 18.6878 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 3.0574 | 1.0 | 1276 | 2.7967 | 23.0533 | 3.912 | 17.8534 | 17.8581 | 18.6878 | ### Framework versions - Transformers 4.10.2 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
Language-Media-Lab/byt5-small-ain-jpn-mt
d59ba14e3b66aa1a1c0c289212228a86fbe20efa
2022-02-04T13:03:14.000Z
[ "pytorch", "t5", "text2text-generation", "ain", "ja", "transformers", "translation", "autotrain_compatible" ]
translation
false
Language-Media-Lab
null
Language-Media-Lab/byt5-small-ain-jpn-mt
5
1
transformers
16,555
--- language: - ain - ja tags: - translation --- Byt5-small-ain-jpn-mt is a machine translation model pretrained with [Google's ByT5-small](https://huggingface.co/google/byt5-small) and fine-tuned on bilingual datasets crawled from the Web. It translates Ainu language to Japanese.
google/multiberts-seed_4-step_120k
6fdecff287112578214b3f0d9ad37bf395c39378
2021-11-06T03:12:26.000Z
[ "pytorch", "tf", "bert", "pretraining", "en", "arxiv:2106.16163", "arxiv:1908.08962", "transformers", "multiberts", "multiberts-seed_4", "multiberts-seed_4-step_120k", "license:apache-2.0" ]
null
false
google
null
google/multiberts-seed_4-step_120k
5
null
transformers
16,556
--- language: en tags: - multiberts - multiberts-seed_4 - multiberts-seed_4-step_120k license: apache-2.0 --- # MultiBERTs, Intermediate Checkpoint - Seed 4, Step 120k MultiBERTs is a collection of checkpoints and a statistical library to support robust research on BERT. We provide 25 BERT-base models trained with similar hyper-parameters as [the original BERT model](https://github.com/google-research/bert) but with different random seeds, which causes variations in the initial weights and order of training instances. The aim is to distinguish findings that apply to a specific artifact (i.e., a particular instance of the model) from those that apply to the more general procedure. We also provide 140 intermediate checkpoints captured during the course of pre-training (we saved 28 checkpoints for the first 5 runs). The models were originally released through [http://goo.gle/multiberts](http://goo.gle/multiberts). We describe them in our paper [The MultiBERTs: BERT Reproductions for Robustness Analysis](https://arxiv.org/abs/2106.16163). This is model #4, captured at step 120k (max: 2000k, i.e., 2M steps). ## Model Description This model was captured during a reproduction of [BERT-base uncased](https://github.com/google-research/bert), for English: it is a Transformers model pretrained on a large corpus of English data, using the Masked Language Modelling (MLM) and the Next Sentence Prediction (NSP) objectives. The intended uses, limitations, training data and training procedure for the fully trained model are similar to [BERT-base uncased](https://github.com/google-research/bert). Two major differences with the original model: * We pre-trained the MultiBERTs models for 2 million steps using sequence length 512 (instead of 1 million steps using sequence length 128 then 512). * We used an alternative version of Wikipedia and Books Corpus, initially collected for [Turc et al., 2019](https://arxiv.org/abs/1908.08962). This is a best-effort reproduction, and so it is probable that differences with the original model have gone unnoticed. The performance of MultiBERTs on GLUE after full training is oftentimes comparable to that of original BERT, but we found significant differences on the dev set of SQuAD (MultiBERTs outperforms original BERT). See our [technical report](https://arxiv.org/abs/2106.16163) for more details. ### How to use Using code from [BERT-base uncased](https://huggingface.co/bert-base-uncased), here is an example based on Tensorflow: ``` from transformers import BertTokenizer, TFBertModel tokenizer = BertTokenizer.from_pretrained('google/multiberts-seed_4-step_120k') model = TFBertModel.from_pretrained("google/multiberts-seed_4-step_120k") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` PyTorch version: ``` from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained('google/multiberts-seed_4-step_120k') model = BertModel.from_pretrained("google/multiberts-seed_4-step_120k") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` ## Citation info ```bibtex @article{sellam2021multiberts, title={The MultiBERTs: BERT Reproductions for Robustness Analysis}, author={Thibault Sellam and Steve Yadlowsky and Jason Wei and Naomi Saphra and Alexander D'Amour and Tal Linzen and Jasmijn Bastings and Iulia Turc and Jacob Eisenstein and Dipanjan Das and Ian Tenney and Ellie Pavlick}, journal={arXiv preprint arXiv:2106.16163}, year={2021} } ```
google/multiberts-seed_4-step_1700k
450fb06c819d6e53d826386ad939febe49ef7d29
2021-11-06T03:45:49.000Z
[ "pytorch", "tf", "bert", "pretraining", "en", "arxiv:2106.16163", "arxiv:1908.08962", "transformers", "multiberts", "multiberts-seed_4", "multiberts-seed_4-step_1700k", "license:apache-2.0" ]
null
false
google
null
google/multiberts-seed_4-step_1700k
5
null
transformers
16,557
--- language: en tags: - multiberts - multiberts-seed_4 - multiberts-seed_4-step_1700k license: apache-2.0 --- # MultiBERTs, Intermediate Checkpoint - Seed 4, Step 1700k MultiBERTs is a collection of checkpoints and a statistical library to support robust research on BERT. We provide 25 BERT-base models trained with similar hyper-parameters as [the original BERT model](https://github.com/google-research/bert) but with different random seeds, which causes variations in the initial weights and order of training instances. The aim is to distinguish findings that apply to a specific artifact (i.e., a particular instance of the model) from those that apply to the more general procedure. We also provide 140 intermediate checkpoints captured during the course of pre-training (we saved 28 checkpoints for the first 5 runs). The models were originally released through [http://goo.gle/multiberts](http://goo.gle/multiberts). We describe them in our paper [The MultiBERTs: BERT Reproductions for Robustness Analysis](https://arxiv.org/abs/2106.16163). This is model #4, captured at step 1700k (max: 2000k, i.e., 2M steps). ## Model Description This model was captured during a reproduction of [BERT-base uncased](https://github.com/google-research/bert), for English: it is a Transformers model pretrained on a large corpus of English data, using the Masked Language Modelling (MLM) and the Next Sentence Prediction (NSP) objectives. The intended uses, limitations, training data and training procedure for the fully trained model are similar to [BERT-base uncased](https://github.com/google-research/bert). Two major differences with the original model: * We pre-trained the MultiBERTs models for 2 million steps using sequence length 512 (instead of 1 million steps using sequence length 128 then 512). * We used an alternative version of Wikipedia and Books Corpus, initially collected for [Turc et al., 2019](https://arxiv.org/abs/1908.08962). This is a best-effort reproduction, and so it is probable that differences with the original model have gone unnoticed. The performance of MultiBERTs on GLUE after full training is oftentimes comparable to that of original BERT, but we found significant differences on the dev set of SQuAD (MultiBERTs outperforms original BERT). See our [technical report](https://arxiv.org/abs/2106.16163) for more details. ### How to use Using code from [BERT-base uncased](https://huggingface.co/bert-base-uncased), here is an example based on Tensorflow: ``` from transformers import BertTokenizer, TFBertModel tokenizer = BertTokenizer.from_pretrained('google/multiberts-seed_4-step_1700k') model = TFBertModel.from_pretrained("google/multiberts-seed_4-step_1700k") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` PyTorch version: ``` from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained('google/multiberts-seed_4-step_1700k') model = BertModel.from_pretrained("google/multiberts-seed_4-step_1700k") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` ## Citation info ```bibtex @article{sellam2021multiberts, title={The MultiBERTs: BERT Reproductions for Robustness Analysis}, author={Thibault Sellam and Steve Yadlowsky and Jason Wei and Naomi Saphra and Alexander D'Amour and Tal Linzen and Jasmijn Bastings and Iulia Turc and Jacob Eisenstein and Dipanjan Das and Ian Tenney and Ellie Pavlick}, journal={arXiv preprint arXiv:2106.16163}, year={2021} } ```
google/multiberts-seed_4-step_300k
0c62193e614ff5e171c9b48e6ebe4b7b5113376a
2021-11-06T03:21:36.000Z
[ "pytorch", "tf", "bert", "pretraining", "en", "arxiv:2106.16163", "arxiv:1908.08962", "transformers", "multiberts", "multiberts-seed_4", "multiberts-seed_4-step_300k", "license:apache-2.0" ]
null
false
google
null
google/multiberts-seed_4-step_300k
5
null
transformers
16,558
--- language: en tags: - multiberts - multiberts-seed_4 - multiberts-seed_4-step_300k license: apache-2.0 --- # MultiBERTs, Intermediate Checkpoint - Seed 4, Step 300k MultiBERTs is a collection of checkpoints and a statistical library to support robust research on BERT. We provide 25 BERT-base models trained with similar hyper-parameters as [the original BERT model](https://github.com/google-research/bert) but with different random seeds, which causes variations in the initial weights and order of training instances. The aim is to distinguish findings that apply to a specific artifact (i.e., a particular instance of the model) from those that apply to the more general procedure. We also provide 140 intermediate checkpoints captured during the course of pre-training (we saved 28 checkpoints for the first 5 runs). The models were originally released through [http://goo.gle/multiberts](http://goo.gle/multiberts). We describe them in our paper [The MultiBERTs: BERT Reproductions for Robustness Analysis](https://arxiv.org/abs/2106.16163). This is model #4, captured at step 300k (max: 2000k, i.e., 2M steps). ## Model Description This model was captured during a reproduction of [BERT-base uncased](https://github.com/google-research/bert), for English: it is a Transformers model pretrained on a large corpus of English data, using the Masked Language Modelling (MLM) and the Next Sentence Prediction (NSP) objectives. The intended uses, limitations, training data and training procedure for the fully trained model are similar to [BERT-base uncased](https://github.com/google-research/bert). Two major differences with the original model: * We pre-trained the MultiBERTs models for 2 million steps using sequence length 512 (instead of 1 million steps using sequence length 128 then 512). * We used an alternative version of Wikipedia and Books Corpus, initially collected for [Turc et al., 2019](https://arxiv.org/abs/1908.08962). This is a best-effort reproduction, and so it is probable that differences with the original model have gone unnoticed. The performance of MultiBERTs on GLUE after full training is oftentimes comparable to that of original BERT, but we found significant differences on the dev set of SQuAD (MultiBERTs outperforms original BERT). See our [technical report](https://arxiv.org/abs/2106.16163) for more details. ### How to use Using code from [BERT-base uncased](https://huggingface.co/bert-base-uncased), here is an example based on Tensorflow: ``` from transformers import BertTokenizer, TFBertModel tokenizer = BertTokenizer.from_pretrained('google/multiberts-seed_4-step_300k') model = TFBertModel.from_pretrained("google/multiberts-seed_4-step_300k") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` PyTorch version: ``` from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained('google/multiberts-seed_4-step_300k') model = BertModel.from_pretrained("google/multiberts-seed_4-step_300k") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` ## Citation info ```bibtex @article{sellam2021multiberts, title={The MultiBERTs: BERT Reproductions for Robustness Analysis}, author={Thibault Sellam and Steve Yadlowsky and Jason Wei and Naomi Saphra and Alexander D'Amour and Tal Linzen and Jasmijn Bastings and Iulia Turc and Jacob Eisenstein and Dipanjan Das and Ian Tenney and Ellie Pavlick}, journal={arXiv preprint arXiv:2106.16163}, year={2021} } ```
google/t5-efficient-base-dm1000
ed74bee9cb525ee951759e47818bd21690d7fc39
2022-02-15T10:52:15.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-dm1000
5
null
transformers
16,559
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-DM1000 (Deep-Narrow version) T5-Efficient-BASE-DM1000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-dm1000** - is of model type **Base** with the following variations: - **dm** is **1000** It has **297.23** million parameters and thus requires *ca.* **1188.93 MB** of memory in full precision (*fp32*) or **594.47 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-el16
01129ccde7c25c2d07b347ed4ccca16779a3d7cc
2022-02-15T10:52:28.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-el16
5
null
transformers
16,560
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-EL16 (Deep-Narrow version) T5-Efficient-BASE-EL16 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-el16** - is of model type **Base** with the following variations: - **el** is **16** It has **251.25** million parameters and thus requires *ca.* **1005.01 MB** of memory in full precision (*fp32*) or **502.51 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-el4
22a92789407de515e7edc287d9763ebedac2cef2
2022-02-15T10:49:47.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-el4
5
null
transformers
16,561
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-EL4 (Deep-Narrow version) T5-Efficient-BASE-EL4 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-el4** - is of model type **Base** with the following variations: - **el** is **4** It has **166.29** million parameters and thus requires *ca.* **665.16 MB** of memory in full precision (*fp32*) or **332.58 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-el8
f7c73f11984ca97b352c6ae58c3ef6336b30fc91
2022-02-15T10:52:37.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-el8
5
null
transformers
16,562
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-EL8 (Deep-Narrow version) T5-Efficient-BASE-EL8 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-el8** - is of model type **Base** with the following variations: - **el** is **8** It has **194.61** million parameters and thus requires *ca.* **778.44 MB** of memory in full precision (*fp32*) or **389.22 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-ff12000
0e14ead8554de3b21a360ad144e8bffa35e6439f
2022-02-15T10:52:43.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-ff12000
5
null
transformers
16,563
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-FF12000 (Deep-Narrow version) T5-Efficient-BASE-FF12000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-ff12000** - is of model type **Base** with the following variations: - **ff** is **12000** It has **562.67** million parameters and thus requires *ca.* **2250.68 MB** of memory in full precision (*fp32*) or **1125.34 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-ff6000
7f4b6a0ce97457da5768a5c6d5d07bc1a57b14c6
2022-02-15T10:52:49.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-ff6000
5
null
transformers
16,564
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-FF6000 (Deep-Narrow version) T5-Efficient-BASE-FF6000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-ff6000** - is of model type **Base** with the following variations: - **ff** is **6000** It has **336.18** million parameters and thus requires *ca.* **1344.71 MB** of memory in full precision (*fp32*) or **672.36 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-ff9000
bef2484838e3bb0bf8fcfaf6c3f5d7d8b3412e04
2022-02-15T10:52:53.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-ff9000
5
null
transformers
16,565
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-FF9000 (Deep-Narrow version) T5-Efficient-BASE-FF9000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-ff9000** - is of model type **Base** with the following variations: - **ff** is **9000** It has **449.42** million parameters and thus requires *ca.* **1797.7 MB** of memory in full precision (*fp32*) or **898.85 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-kv128
01c4ad3d9ef155eaa7a51011e4045eb586165ea8
2022-02-15T10:52:56.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-kv128
5
null
transformers
16,566
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-KV128 (Deep-Narrow version) T5-Efficient-BASE-KV128 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-kv128** - is of model type **Base** with the following variations: - **kv** is **128** It has **307.87** million parameters and thus requires *ca.* **1231.47 MB** of memory in full precision (*fp32*) or **615.73 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-nh8
b1bfc702f8fa666aa8c67da43bb4b9b7fee4a49d
2022-02-15T10:53:18.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-nh8
5
null
transformers
16,567
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-NH8 (Deep-Narrow version) T5-Efficient-BASE-NH8 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-nh8** - is of model type **Base** with the following variations: - **nh** is **8** It has **194.62** million parameters and thus requires *ca.* **778.48 MB** of memory in full precision (*fp32*) or **389.24 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-nl4
971c3fa411256b8dc8dd1bf604a14f058a13d47a
2022-02-15T10:57:25.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-nl4
5
null
transformers
16,568
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-NL4 (Deep-Narrow version) T5-Efficient-BASE-NL4 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-nl4** - is of model type **Base** with the following variations: - **nl** is **4** It has **90.76** million parameters and thus requires *ca.* **363.05 MB** of memory in full precision (*fp32*) or **181.52 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-base-nl48
9bdd6246cd337961ff504ab7a6f5372bab984d48
2022-02-15T10:49:50.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-base-nl48
5
null
transformers
16,569
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-BASE-NL48 (Deep-Narrow version) T5-Efficient-BASE-NL48 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-base-nl48** - is of model type **Base** with the following variations: - **nl** is **48** It has **817.7** million parameters and thus requires *ca.* **3270.79 MB** of memory in full precision (*fp32*) or **1635.39 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-large-dm128
cb1748cfc350cf0d3a80589365e0d21ac42f11ab
2022-02-15T10:53:40.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-large-dm128
5
null
transformers
16,570
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-LARGE-DM128 (Deep-Narrow version) T5-Efficient-LARGE-DM128 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-large-dm128** - is of model type **Large** with the following variations: - **dm** is **128** It has **92.27** million parameters and thus requires *ca.* **369.06 MB** of memory in full precision (*fp32*) or **184.53 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-large-dm768
cb60bfad8f28d439aded205a602c6dc3fb781465
2022-02-15T10:53:46.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-large-dm768
5
null
transformers
16,571
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-LARGE-DM768 (Deep-Narrow version) T5-Efficient-LARGE-DM768 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-large-dm768** - is of model type **Large** with the following variations: - **dm** is **768** It has **553.31** million parameters and thus requires *ca.* **2213.23 MB** of memory in full precision (*fp32*) or **1106.62 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-large-nh12
f162a164b345a4e1cc40e46f0ef910fc1069b527
2022-02-15T10:55:25.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-large-nh12
5
null
transformers
16,572
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-LARGE-NH12 (Deep-Narrow version) T5-Efficient-LARGE-NH12 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-large-nh12** - is of model type **Large** with the following variations: - **nh** is **12** It has **662.23** million parameters and thus requires *ca.* **2648.91 MB** of memory in full precision (*fp32*) or **1324.45 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-large-nl2
5bdfcdec9e53a071ea94f4ac9851b883d16ebca8
2022-02-15T10:55:51.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-large-nl2
5
null
transformers
16,573
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-LARGE-NL2 (Deep-Narrow version) T5-Efficient-LARGE-NL2 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-large-nl2** - is of model type **Large** with the following variations: - **nl** is **2** It has **91.64** million parameters and thus requires *ca.* **366.55 MB** of memory in full precision (*fp32*) or **183.28 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-large-nl20
e7cc889984911658550b777d685d6c61f8969056
2022-02-15T10:55:54.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-large-nl20
5
null
transformers
16,574
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-LARGE-NL20 (Deep-Narrow version) T5-Efficient-LARGE-NL20 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-large-nl20** - is of model type **Large** with the following variations: - **nl** is **20** It has **620.25** million parameters and thus requires *ca.* **2481.02 MB** of memory in full precision (*fp32*) or **1240.51 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-mini-nl12
28971abac69bbff36a31d20354193895cb32c73b
2022-02-15T10:56:07.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-mini-nl12
5
null
transformers
16,575
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-MINI-NL12 (Deep-Narrow version) T5-Efficient-MINI-NL12 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-mini-nl12** - is of model type **Mini** with the following variations: - **nl** is **12** It has **69.01** million parameters and thus requires *ca.* **276.05 MB** of memory in full precision (*fp32*) or **138.03 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-small-ff1000
c16004fc9d6029e89eff639a9f98dc342cb8bd96
2022-02-15T10:50:15.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-small-ff1000
5
null
transformers
16,576
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-SMALL-FF1000 (Deep-Narrow version) T5-Efficient-SMALL-FF1000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-small-ff1000** - is of model type **Small** with the following variations: - **ff** is **1000** It has **47.94** million parameters and thus requires *ca.* **191.75 MB** of memory in full precision (*fp32*) or **95.88 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-small-ff3000
594bf2eea54fcb2a662494bc8dd3073ee6a2bf83
2022-02-15T10:50:21.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-small-ff3000
5
null
transformers
16,577
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-SMALL-FF3000 (Deep-Narrow version) T5-Efficient-SMALL-FF3000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-small-ff3000** - is of model type **Small** with the following variations: - **ff** is **3000** It has **73.1** million parameters and thus requires *ca.* **292.42 MB** of memory in full precision (*fp32*) or **146.21 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-small-ff6000
212322b5d903ae54acc02a7d2f64b8668db2471e
2022-02-15T10:50:24.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-small-ff6000
5
null
transformers
16,578
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-SMALL-FF6000 (Deep-Narrow version) T5-Efficient-SMALL-FF6000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-small-ff6000** - is of model type **Small** with the following variations: - **ff** is **6000** It has **110.85** million parameters and thus requires *ca.* **443.41 MB** of memory in full precision (*fp32*) or **221.71 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-small-kv128
4b36f8cb76545085ef03809c1b7cb920a0bff8d3
2022-02-15T10:50:30.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-small-kv128
5
null
transformers
16,579
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-SMALL-KV128 (Deep-Narrow version) T5-Efficient-SMALL-KV128 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-small-kv128** - is of model type **Small** with the following variations: - **kv** is **128** It has **79.4** million parameters and thus requires *ca.* **317.58 MB** of memory in full precision (*fp32*) or **158.79 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-tiny-el2
48f3b863b85d1b76693734085e5223c38798f899
2022-02-15T10:54:27.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-tiny-el2
5
null
transformers
16,580
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-TINY-EL2 (Deep-Narrow version) T5-Efficient-TINY-EL2 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-tiny-el2** - is of model type **Tiny** with the following variations: - **el** is **2** It has **22.41** million parameters and thus requires *ca.* **89.64 MB** of memory in full precision (*fp32*) or **44.82 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-tiny-el8
7ea9e69a0a57c0f9bec6ed97737bf3b960cdd66d
2022-02-15T10:51:15.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-tiny-el8
5
null
transformers
16,581
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-TINY-EL8 (Deep-Narrow version) T5-Efficient-TINY-EL8 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-tiny-el8** - is of model type **Tiny** with the following variations: - **el** is **8** It has **27.14** million parameters and thus requires *ca.* **108.55 MB** of memory in full precision (*fp32*) or **54.28 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-tiny-ff12000
eb6c4f28a81ec0bcaa1e6291114f8ee714374832
2022-02-15T10:51:18.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-tiny-ff12000
5
null
transformers
16,582
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-TINY-FF12000 (Deep-Narrow version) T5-Efficient-TINY-FF12000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-tiny-ff12000** - is of model type **Tiny** with the following variations: - **ff** is **12000** It has **61.72** million parameters and thus requires *ca.* **246.87 MB** of memory in full precision (*fp32*) or **123.44 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-tiny-ff6000
8739514cfd39c08a0be3b0d4cdcb5c6ca3105c76
2022-02-15T10:51:21.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-tiny-ff6000
5
null
transformers
16,583
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-TINY-FF6000 (Deep-Narrow version) T5-Efficient-TINY-FF6000 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-tiny-ff6000** - is of model type **Tiny** with the following variations: - **ff** is **6000** It has **36.55** million parameters and thus requires *ca.* **146.21 MB** of memory in full precision (*fp32*) or **73.1 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-tiny-nh16
6c9ce83d5c0e16309accc18c0c1e542f3a44db88
2022-02-15T10:51:25.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-tiny-nh16
5
null
transformers
16,584
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-TINY-NH16 (Deep-Narrow version) T5-Efficient-TINY-NH16 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-tiny-nh16** - is of model type **Tiny** with the following variations: - **nh** is **16** It has **25.02** million parameters and thus requires *ca.* **100.07 MB** of memory in full precision (*fp32*) or **50.04 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-efficient-xl-nl6
a70f782380f63c55f86a4e18834693151d7ff2f2
2022-02-15T10:51:56.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "en", "dataset:c4", "arxiv:2109.10686", "transformers", "deep-narrow", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-efficient-xl-nl6
5
null
transformers
16,585
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-XL-NL6 (Deep-Narrow version) T5-Efficient-XL-NL6 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-xl-nl6** - is of model type **Xl** with the following variations: - **nl** is **6** It has **737.59** million parameters and thus requires *ca.* **2950.37 MB** of memory in full precision (*fp32*) or **1475.18 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
google/t5-xxl-ssm-nqo
a3ec36bdb6c74ea3a2e5f5d326042b81930e6167
2020-12-07T08:44:07.000Z
[ "pytorch", "tf", "t5", "text2text-generation", "en", "dataset:c4", "dataset:wikipedia", "dataset:natural_questions", "arxiv:2002.08909", "arxiv:1910.10683", "transformers", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
google
null
google/t5-xxl-ssm-nqo
5
null
transformers
16,586
--- language: en datasets: - c4 - wikipedia - natural_questions license: apache-2.0 --- [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) for **Closed Book Question Answering**. The model was pre-trained using T5's denoising objective on [C4](https://huggingface.co/datasets/c4), subsequently additionally pre-trained using [REALM](https://arxiv.org/pdf/2002.08909.pdf)'s salient span masking objective on [Wikipedia](https://huggingface.co/datasets/wikipedia), and finally fine-tuned on [Natural Questions (NQ)](https://huggingface.co/datasets/natural_questions). **Note**: The model was fine-tuned on 90% of the train splits of [Natural Questions (NQ)](https://huggingface.co/datasets/natural_questions) for 20k steps and validated on the held-out 10% of the train split. Other community Checkpoints: [here](https://huggingface.co/models?search=ssm) Paper: [How Much Knowledge Can You Pack Into the Parameters of a Language Model?](https://arxiv.org/abs/1910.10683.pdf) Authors: *Adam Roberts, Colin Raffel, Noam Shazeer* ## Results on Natural Questions - Test Set |Id | link | Exact Match | |---|---|---| |T5-large|https://huggingface.co/google/t5-large-ssm-nqo|29.0| |**T5-xxl**|**https://huggingface.co/google/t5-xxl-ssm-nqo**|**35.2**| |T5-3b|https://huggingface.co/google/t5-3b-ssm-nqo|31.7| |T5-11b|https://huggingface.co/google/t5-11b-ssm-nqo|34.8| ## Usage The model can be used as follows for **closed book question answering**: ```python from transformers import AutoModelForSeq2SeqLM, AutoTokenizer t5_qa_model = AutoModelForSeq2SeqLM.from_pretrained("google/t5-xxl-ssm-nqo") t5_tok = AutoTokenizer.from_pretrained("google/t5-xxl-ssm-nqo") input_ids = t5_tok("When was Franklin D. Roosevelt born?", return_tensors="pt").input_ids gen_output = t5_qa_model.generate(input_ids)[0] print(t5_tok.decode(gen_output, skip_special_tokens=True)) ``` ## Abstract It has recently been observed that neural language models trained on unstructured text can implicitly store and retrieve knowledge using natural language queries. In this short paper, we measure the practical utility of this approach by fine-tuning pre-trained models to answer questions without access to any external context or knowledge. We show that this approach scales with model size and performs competitively with open-domain systems that explicitly retrieve answers from an external knowledge source when answering questions. To facilitate reproducibility and future work, we release our code and trained models at https://goo.gle/t5-cbqa. ![model image](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/how_much_know_ledge_image.png)
google/tapas-medium-masklm
0eb623259598f26f0c89c4ef167281498ce4e040
2021-11-29T14:20:32.000Z
[ "pytorch", "tf", "tapas", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
google
null
google/tapas-medium-masklm
5
null
transformers
16,587
This model corresponds to **tapas_masklm_medium_reset** of the [original repository](https://github.com/google-research/tapas). Here's how you can use it: ```python from transformers import TapasTokenizer, TapasForMaskedLM import pandas as pd import torch tokenizer = TapasTokenizer.from_pretrained("google/tapas-medium-masklm") model = TapasForMaskedLM.from_pretrained("google/tapas-medium-masklm") data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], 'Age': ["56", "45", "59"], 'Number of movies': ["87", "53", "69"] } table = pd.DataFrame.from_dict(data) query = "How many movies has Leonardo [MASK] Caprio played in?" # prepare inputs inputs = tokenizer(table=table, queries=query, padding="max_length", return_tensors="pt") # forward pass outputs = model(**inputs) # return top 5 values and predictions masked_index = torch.nonzero(inputs.input_ids.squeeze() == tokenizer.mask_token_id, as_tuple=False) logits = outputs.logits[0, masked_index.item(), :] probs = logits.softmax(dim=0) values, predictions = probs.topk(5) for value, pred in zip(values, predictions): print(f"{tokenizer.decode([pred])} with confidence {value}") ```
google/tapas-tiny
9310139ad2732e5f63ae2ca9bc5b5223fd6ba622
2021-11-29T10:01:08.000Z
[ "pytorch", "tf", "tapas", "feature-extraction", "en", "arxiv:2004.02349", "arxiv:2010.00571", "transformers", "TapasModel", "license:apache-2.0" ]
feature-extraction
false
google
null
google/tapas-tiny
5
null
transformers
16,588
--- language: en tags: - tapas - TapasModel license: apache-2.0 --- # TAPAS tiny model This model has 2 versions which can be used. The latest version, which is the default one, corresponds to the `tapas_inter_masklm_tiny_reset` checkpoint of the [original Github repository](https://github.com/google-research/tapas). This model was pre-trained on MLM and an additional step which the authors call intermediate pre-training. It uses relative position embeddings by default (i.e. resetting the position index at every cell of the table). The other (non-default) version which can be used is the one with absolute position embeddings: - `revision="no_reset"`, which corresponds to `tapas_inter_masklm_tiny` Disclaimer: The team releasing TAPAS did not write a model card for this model so this model card has been written by the Hugging Face team and contributors. ## Model description TAPAS is a BERT-like transformers model pretrained on a large corpus of English data from Wikipedia in a self-supervised fashion. This means it was pretrained on the raw tables and associated texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a (flattened) table and associated context, the model randomly masks 15% of the words in the input, then runs the entire (partially masked) sequence through the model. The model then has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of a table and associated text. - Intermediate pre-training: to encourage numerical reasoning on tables, the authors additionally pre-trained the model by creating a balanced dataset of millions of syntactically created training examples. Here, the model must predict (classify) whether a sentence is supported or refuted by the contents of a table. The training examples are created based on synthetic as well as counterfactual statements. This way, the model learns an inner representation of the English language used in tables and associated texts, which can then be used to extract features useful for downstream tasks such as answering questions about a table, or determining whether a sentence is entailed or refuted by the contents of a table. Fine-tuning is done by adding one or more classification heads on top of the pre-trained model, and then jointly train these randomly initialized classification heads with the base model on a downstream task. ## Intended uses & limitations You can use the raw model for getting hidden representatons about table-question pairs, but it's mostly intended to be fine-tuned on a downstream task such as question answering or sequence classification. See the [model hub](https://huggingface.co/models?filter=tapas) to look for fine-tuned versions on a task that interests you. ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence [SEP] Flattened table [SEP] ``` ### Pre-training The model was pre-trained on 32 Cloud TPU v3 cores for 1,000,000 steps with maximum sequence length 512 and batch size of 512. In this setup, pre-training on MLM only takes around 3 days. Aditionally, the model has been further pre-trained on a second task (table entailment). See the original TAPAS [paper](https://www.aclweb.org/anthology/2020.acl-main.398/) and the [follow-up paper](https://www.aclweb.org/anthology/2020.findings-emnlp.27/) for more details. The optimizer used is Adam with a learning rate of 5e-5, and a warmup ratio of 0.01. ### BibTeX entry and citation info ```bibtex @misc{herzig2020tapas, title={TAPAS: Weakly Supervised Table Parsing via Pre-training}, author={Jonathan Herzig and Paweł Krzysztof Nowak and Thomas Müller and Francesco Piccinno and Julian Martin Eisenschlos}, year={2020}, eprint={2004.02349}, archivePrefix={arXiv}, primaryClass={cs.IR} } ``` ```bibtex @misc{eisenschlos2020understanding, title={Understanding tables with intermediate pre-training}, author={Julian Martin Eisenschlos and Syrine Krichene and Thomas Müller}, year={2020}, eprint={2010.00571}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
goumbalamm/EsperBERTo
c6e2cd8fa0ccfb1d52303ddc6b75dd35b88798d4
2021-05-20T16:34:49.000Z
[ "pytorch", "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
goumbalamm
null
goumbalamm/EsperBERTo
5
null
transformers
16,589
Entry not found
hadxu/distilbert-base-uncased-finetuned-clinc
f74ac603efe31c95fa95e4a536530ac3d44a31b9
2022-02-13T04:05:09.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:clinc_oos", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
hadxu
null
hadxu/distilbert-base-uncased-finetuned-clinc
5
null
transformers
16,590
--- license: apache-2.0 tags: - generated_from_trainer datasets: - clinc_oos metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-clinc results: - task: name: Text Classification type: text-classification dataset: name: clinc_oos type: clinc_oos args: plus metrics: - name: Accuracy type: accuracy value: 0.9116129032258065 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.7730 - Accuracy: 0.9116 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 4.3183 | 1.0 | 318 | 3.3075 | 0.7416 | | 2.633 | 2.0 | 636 | 1.8792 | 0.8384 | | 1.5339 | 3.0 | 954 | 1.1514 | 0.8939 | | 1.0038 | 4.0 | 1272 | 0.8567 | 0.9077 | | 0.7868 | 5.0 | 1590 | 0.7730 | 0.9116 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.16.1 - Tokenizers 0.10.3
hanseokhyeon/bert-badword-large
ef37b4057cb44c4d66082bb2a54a264eb13f0a65
2021-05-19T18:02:17.000Z
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
false
hanseokhyeon
null
hanseokhyeon/bert-badword-large
5
null
transformers
16,591
Entry not found
hanseokhyeon/bert-badword-puri-000
f6cf61bd263bb7cdd377a32139f0a4eed516ebc8
2021-05-19T18:04:48.000Z
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
false
hanseokhyeon
null
hanseokhyeon/bert-badword-puri-000
5
null
transformers
16,592
Entry not found
hanseokhyeon/bert-badword-puri
2d2dac75ff184bf6658e4fbd6dd114dd4c66a93a
2021-05-19T18:13:40.000Z
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
false
hanseokhyeon
null
hanseokhyeon/bert-badword-puri
5
null
transformers
16,593
Entry not found
harish/BERTBaseClone-10000-6000000
5c8815f5a8a7088992f7270a613686853d74dcb8
2021-05-19T18:23:26.000Z
[ "pytorch", "jax", "bert", "transformers" ]
null
false
harish
null
harish/BERTBaseClone-10000-6000000
5
null
transformers
16,594
Entry not found
harish/EN-AStitchTask1A-DistilBERT-FalseTrue-0-2-BEST
50d163528ed33e4f1e8b69532a1b4ade2ec9b11d
2021-09-05T00:26:05.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
harish
null
harish/EN-AStitchTask1A-DistilBERT-FalseTrue-0-2-BEST
5
null
transformers
16,595
Entry not found
harish/EN-AStitchTask1A-XLNet-FalseFalse-0-FewShot-4-BEST
dd11e4cd98053e5318f75767062b925a4988bcb8
2021-09-05T00:42:26.000Z
[ "pytorch", "xlnet", "text-classification", "transformers" ]
text-classification
false
harish
null
harish/EN-AStitchTask1A-XLNet-FalseFalse-0-FewShot-4-BEST
5
null
transformers
16,596
Entry not found
harish/EN-AStitchTask1A-XLNet-FalseFalse-0-OneShot-0-BEST
f007219b7925b7d5ba17cefcc8bf013ca6d57a9e
2021-09-05T00:40:18.000Z
[ "pytorch", "xlnet", "text-classification", "transformers" ]
text-classification
false
harish
null
harish/EN-AStitchTask1A-XLNet-FalseFalse-0-OneShot-0-BEST
5
null
transformers
16,597
Entry not found
harish/EN-AStitchTask1A-XLNet-FalseTrue-0-FewShot-0-BEST
afaced1f93006968bdcc74ab488161fcbcaf53ee
2021-09-05T00:29:47.000Z
[ "pytorch", "xlnet", "text-classification", "transformers" ]
text-classification
false
harish
null
harish/EN-AStitchTask1A-XLNet-FalseTrue-0-FewShot-0-BEST
5
null
transformers
16,598
Entry not found
harish/PT-UP-xlmR-ContextIncluded_IdiomExcluded-OneShot-4_BEST
9e1e1208e0968a8e87ab19fc4b77cb6f89725e06
2021-08-30T02:00:11.000Z
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
false
harish
null
harish/PT-UP-xlmR-ContextIncluded_IdiomExcluded-OneShot-4_BEST
5
null
transformers
16,599
Entry not found