modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
eldadshulman/distilbert-base-uncased-finetuned-squad
7d506eb577518a171c31a490d29bbcf0c873ef39
2022-05-22T15:44:07.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
eldadshulman
null
eldadshulman/distilbert-base-uncased-finetuned-squad
1
null
transformers
32,100
Entry not found
stevemobs/deberta-base-finetuned-squad1
0f5f8f653d34e92056c2bdad38bfd4b5397ada47
2022-05-22T19:54:06.000Z
[ "pytorch", "tensorboard", "deberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
stevemobs
null
stevemobs/deberta-base-finetuned-squad1
1
null
transformers
32,101
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: deberta-base-finetuned-squad1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-finetuned-squad1 This model is a fine-tuned version of [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 0.8037 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.7928 | 1.0 | 7380 | 0.7810 | | 0.5795 | 2.0 | 14760 | 0.8037 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Sangita/distilbert-base-uncased-finetuned-squad
da6e92af86592965fdd30c3377b85c01f50f0045
2022-05-22T16:42:57.000Z
[ "pytorch", "distilbert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Sangita
null
Sangita/distilbert-base-uncased-finetuned-squad
1
null
transformers
32,102
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Diegomejia/bert-ucb-3
df097e2487a473933600aba2a90031c0c1ef22eb
2022-05-22T17:34:54.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
Diegomejia
null
Diegomejia/bert-ucb-3
1
null
transformers
32,103
Entry not found
chrisvinsen/wav2vec2-5
bca3039202a27971dcda8c21bd69ec5690f581f0
2022-05-22T21:32:42.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
chrisvinsen
null
chrisvinsen/wav2vec2-5
1
null
transformers
32,104
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-5 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.0700 - Wer: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.003 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 400 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:---:| | 3.4082 | 1.37 | 200 | 3.3181 | 1.0 | | 2.8798 | 2.74 | 400 | 2.9921 | 1.0 | | 2.8703 | 4.11 | 600 | 3.1937 | 1.0 | | 2.8643 | 5.48 | 800 | 3.0304 | 1.0 | | 2.8655 | 6.85 | 1000 | 3.0321 | 1.0 | | 2.8655 | 8.22 | 1200 | 3.0716 | 1.0 | | 2.863 | 9.59 | 1400 | 3.1764 | 1.0 | | 2.8567 | 10.96 | 1600 | 3.0600 | 1.0 | | 2.861 | 12.33 | 1800 | 3.1761 | 1.0 | | 2.8606 | 13.7 | 2000 | 3.1028 | 1.0 | | 2.8613 | 15.07 | 2200 | 3.2119 | 1.0 | | 2.8612 | 16.44 | 2400 | 3.1158 | 1.0 | | 2.8603 | 17.81 | 2600 | 3.1230 | 1.0 | | 2.8601 | 19.18 | 2800 | 3.0380 | 1.0 | | 2.856 | 20.55 | 3000 | 3.0729 | 1.0 | | 2.8557 | 21.92 | 3200 | 3.0511 | 1.0 | | 2.8556 | 23.29 | 3400 | 3.0710 | 1.0 | | 2.8552 | 24.66 | 3600 | 3.1364 | 1.0 | | 2.8574 | 26.03 | 3800 | 3.0104 | 1.0 | | 2.8543 | 27.4 | 4000 | 3.1068 | 1.0 | | 2.8558 | 28.77 | 4200 | 3.0700 | 1.0 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
krotima1/mbart-ht2a-c
1e5103eff3e9bac2d4a2bf90eb6ee6fd635093fd
2022-05-23T20:37:09.000Z
[ "pytorch", "mbart", "text2text-generation", "cs", "dataset:private CNC dataset news-based", "transformers", "abstractive summarization", "mbart-cc25", "Czech", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
krotima1
null
krotima1/mbart-ht2a-c
1
null
transformers
32,105
--- language: - cs - cs tags: - abstractive summarization - mbart-cc25 - Czech license: apache-2.0 datasets: - private CNC dataset news-based metrics: - rouge - rougeraw --- # mBART fine-tuned model for Czech abstractive summarization (HT2A-C) This model is a fine-tuned checkpoint of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on the Czech news dataset to produce Czech abstractive summaries. ## Task The model deals with the task ``Headline + Text to Abstract`` (HT2A) which consists in generating a multi-sentence summary considered as an abstract from a Czech news text. ## Dataset The model has been trained on the private CNC dataset provided by Czech News Center. The dataset includes 3/4M Czech news-based documents consisting of a Headline, Abstract, and Full-text sections. Truncation and padding were set to 512 tokens for the encoder and 128 for the decoder. ## Training The model has been trained on 1x NVIDIA Tesla A100 40GB for 60 hours. During training, the model has seen 3712K documents corresponding to roughly 5.5 epochs. # Use Assuming you are using the provided Summarizer.ipynb file. ```python def summ_config(): cfg = OrderedDict([ # summarization model - checkpoint from website ("model_name", "krotima1/mbart-ht2a-c"), ("inference_cfg", OrderedDict([ ("num_beams", 4), ("top_k", 40), ("top_p", 0.92), ("do_sample", True), ("temperature", 0.89), ("repetition_penalty", 1.2), ("no_repeat_ngram_size", None), ("early_stopping", True), ("max_length", 128), ("min_length", 10), ])), #texts to summarize ("text", [ "Input your Czech text", ] ), ]) return cfg cfg = summ_config() #load model model = AutoModelForSeq2SeqLM.from_pretrained(cfg["model_name"]) tokenizer = AutoTokenizer.from_pretrained(cfg["model_name"]) # init summarizer summarize = Summarizer(model, tokenizer, cfg["inference_cfg"]) summarize(cfg["text"]) ```
stevemobs/deberta-base-finetuned-squad1-aqa
bff4a8b384ccc9f3977ebd9d714c34fe0690fc33
2022-05-22T22:10:52.000Z
[ "pytorch", "tensorboard", "deberta", "question-answering", "dataset:adversarial_qa", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
stevemobs
null
stevemobs/deberta-base-finetuned-squad1-aqa
1
null
transformers
32,106
--- license: mit tags: - generated_from_trainer datasets: - adversarial_qa model-index: - name: deberta-base-finetuned-squad1-aqa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-finetuned-squad1-aqa This model is a fine-tuned version of [stevemobs/deberta-base-finetuned-squad1](https://huggingface.co/stevemobs/deberta-base-finetuned-squad1) on the adversarial_qa dataset. It achieves the following results on the evaluation set: - Loss: 1.5912 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.9115 | 1.0 | 2527 | 1.5572 | | 1.3429 | 2.0 | 5054 | 1.5912 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
prodm93/gpt2_rn_ep2_model
aae7e0e9f74e2db128c8a5ff0d96c77f6639e5cb
2022-05-22T21:07:05.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
prodm93
null
prodm93/gpt2_rn_ep2_model
1
null
transformers
32,107
Entry not found
globuslabs/ScholarBERT_1
fe600127108bb3de1f4f9f32ff6235d28510831e
2022-05-24T03:16:16.000Z
[ "pytorch", "bert", "fill-mask", "en", "arxiv:2205.11342", "transformers", "science", "multi-displinary", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
globuslabs
null
globuslabs/ScholarBERT_1
1
null
transformers
32,108
--- language: en tags: - science - multi-displinary license: apache-2.0 --- # ScholarBERT_1 Model This is the **ScholarBERT_1** variant of the ScholarBERT model family. The model is pretrained on a large collection of scientific research articles (**2.2B tokens**). This is a **cased** (case-sensitive) model. The tokenizer will not convert all inputs to lower-case by default. The model is based on the same architecture as [BERT-large](https://huggingface.co/bert-large-cased) and has a total of 340M parameters. # Model Architecture | Hyperparameter | Value | |-----------------|:-------:| | Layers | 24 | | Hidden Size | 1024 | | Attention Heads | 16 | | Total Parameters | 340M | # Training Dataset The vocab and the model are pertrained on **1% of the PRD** scientific literature dataset. The PRD dataset is provided by Public.Resource.Org, Inc. (“Public Resource”), a nonprofit organization based in California. This dataset was constructed from a corpus of journal article files, from which We successfully extracted text from 75,496,055 articles from 178,928 journals. The articles span across Arts & Humanities, Life Sciences & Biomedicine, Physical Sciences, Social Sciences, and Technology. The distribution of articles is shown below. ![corpus pie chart](https://huggingface.co/globuslabs/ScholarBERT/resolve/main/corpus_pie_chart.png) # BibTeX entry and citation info If using this model, please cite this paper: ``` @misc{hong2022scholarbert, doi = {10.48550/ARXIV.2205.11342}, url = {https://arxiv.org/abs/2205.11342}, author = {Hong, Zhi and Ajith, Aswathy and Pauloski, Gregory and Duede, Eamon and Malamud, Carl and Magoulas, Roger and Chard, Kyle and Foster, Ian}, title = {ScholarBERT: Bigger is Not Always Better}, publisher = {arXiv}, year = {2022} } ```
chrisvinsen/wav2vec2-6
89af540e2c3cdebd98cc3f7cd0aac56719113f5c
2022-05-23T03:36:25.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
chrisvinsen
null
chrisvinsen/wav2vec2-6
1
null
transformers
32,109
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-6 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 5.2459 - Wer: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.003 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 400 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:---:| | 4.5873 | 1.56 | 200 | 5.4586 | 1.0 | | 4.1846 | 3.12 | 400 | 5.2278 | 1.0 | | 4.1711 | 4.69 | 600 | 5.3131 | 1.0 | | 4.1581 | 6.25 | 800 | 5.2558 | 1.0 | | 4.1275 | 7.81 | 1000 | 5.2556 | 1.0 | | 4.1452 | 9.38 | 1200 | 5.2637 | 1.0 | | 4.1614 | 10.94 | 1400 | 5.2847 | 1.0 | | 4.1667 | 12.5 | 1600 | 5.2349 | 1.0 | | 4.1471 | 14.06 | 1800 | 5.2850 | 1.0 | | 4.1268 | 15.62 | 2000 | 5.2510 | 1.0 | | 4.1701 | 17.19 | 2200 | 5.2605 | 1.0 | | 4.1459 | 18.75 | 2400 | 5.2493 | 1.0 | | 4.1411 | 20.31 | 2600 | 5.2649 | 1.0 | | 4.1351 | 21.88 | 2800 | 5.2541 | 1.0 | | 4.1442 | 23.44 | 3000 | 5.2459 | 1.0 | | 4.1805 | 25.0 | 3200 | 5.2232 | 1.0 | | 4.1262 | 26.56 | 3400 | 5.2384 | 1.0 | | 4.145 | 28.12 | 3600 | 5.2522 | 1.0 | | 4.142 | 29.69 | 3800 | 5.2459 | 1.0 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Diegomejia/bert-ucb-4
4e80d217bfcd36f054e5f412dc607800724007d5
2022-05-23T01:11:03.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
Diegomejia
null
Diegomejia/bert-ucb-4
1
null
transformers
32,110
Entry not found
Dizzykong/Gusteau
b32164f5366dd90719d84803148aff1cbe0edae9
2022-05-23T06:26:23.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "model-index" ]
text-generation
false
Dizzykong
null
Dizzykong/Gusteau
1
null
transformers
32,111
--- tags: - generated_from_trainer model-index: - name: Gusteau results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Gusteau This model is a fine-tuned version of [gpt2-medium](https://huggingface.co/gpt2-medium) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 0.16 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Splend1dchan/wav2vec2-large-lv60_t5lephone-small_lnalrdiff_bs64
d18ad040648cc23023e24ba5f9fe7d0a6611bc12
2022-05-23T08:37:55.000Z
[ "pytorch", "speechmix", "transformers" ]
null
false
Splend1dchan
null
Splend1dchan/wav2vec2-large-lv60_t5lephone-small_lnalrdiff_bs64
1
null
transformers
32,112
Entry not found
PSW/samsum_percent1_maxsimdel
926837524d90fa87b73f38d832a35804520f09e3
2022-05-23T02:23:43.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent1_maxsimdel
1
null
transformers
32,113
Entry not found
PSW/samsum_percent1_minsimins
bfc33b9cfc7bde57c2eb5fdb1c0b347ed7326289
2022-05-23T02:34:32.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent1_minsimins
1
null
transformers
32,114
Entry not found
PSW/samsum_percent10_maxsimdel
290b2e3dee9750cccb3a008326f9a50d4dbd5a02
2022-05-23T02:48:50.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent10_maxsimdel
1
null
transformers
32,115
Entry not found
Dulu/wav2vec2-xlsr-mn-eng-v0
a3c7dd5ddb9e41f297efff9ef998a92b759c5698
2022-05-24T19:27:25.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Dulu
null
Dulu/wav2vec2-xlsr-mn-eng-v0
1
null
transformers
32,116
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-xlsr-mn-eng results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-xlsr-mn-eng This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the IEMOCAP and Common Voice's MN dataset. Can be used to recognize speech on ENG and MN simultaneously. It achieves the following results on the evaluation set: - Loss: 0.3087 - Wer: 0.3402 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 8.8609 | 0.08 | 500 | 3.6078 | 1.0 | | 3.5494 | 0.15 | 1000 | 3.2044 | 1.0 | | 3.1699 | 0.23 | 1500 | 3.1560 | 1.0 | | 3.0955 | 0.3 | 2000 | 3.1087 | 1.0 | | 2.7918 | 0.38 | 2500 | 2.1146 | 1.0236 | | 2.0528 | 0.45 | 3000 | 1.4938 | 0.9648 | | 1.6329 | 0.53 | 3500 | 1.2614 | 0.9198 | | 1.3932 | 0.6 | 4000 | 1.0504 | 0.8314 | | 1.2652 | 0.68 | 4500 | 0.9664 | 0.7809 | | 1.1829 | 0.76 | 5000 | 0.8999 | 0.7381 | | 1.1674 | 0.83 | 5500 | 0.8200 | 0.6924 | | 1.0599 | 0.91 | 6000 | 0.7713 | 0.6729 | | 1.027 | 0.98 | 6500 | 0.7714 | 0.6616 | | 0.9289 | 1.06 | 7000 | 0.7571 | 0.6433 | | 0.9192 | 1.13 | 7500 | 0.6899 | 0.6151 | | 0.8996 | 1.21 | 8000 | 0.7012 | 0.6104 | | 0.9281 | 1.28 | 8500 | 0.6452 | 0.5914 | | 0.8656 | 1.36 | 9000 | 0.6162 | 0.5781 | | 0.8635 | 1.44 | 9500 | 0.6249 | 0.5672 | | 0.8388 | 1.51 | 10000 | 0.5936 | 0.5558 | | 0.8087 | 1.59 | 10500 | 0.5844 | 0.5466 | | 0.7755 | 1.66 | 11000 | 0.5838 | 0.5364 | | 0.8377 | 1.74 | 11500 | 0.5358 | 0.5202 | | 0.8308 | 1.81 | 12000 | 0.5333 | 0.5196 | | 0.7775 | 1.89 | 12500 | 0.5129 | 0.5060 | | 0.7747 | 1.96 | 13000 | 0.5164 | 0.5096 | | 0.7115 | 2.04 | 13500 | 0.5056 | 0.4936 | | 0.6974 | 2.12 | 14000 | 0.4925 | 0.4878 | | 0.6672 | 2.19 | 14500 | 0.5030 | 0.4908 | | 0.6396 | 2.27 | 15000 | 0.4821 | 0.4686 | | 0.6943 | 2.34 | 15500 | 0.4693 | 0.4624 | | 0.6413 | 2.42 | 16000 | 0.4626 | 0.4636 | | 0.6446 | 2.49 | 16500 | 0.4513 | 0.4609 | | 0.6338 | 2.57 | 17000 | 0.4386 | 0.4524 | | 0.6208 | 2.65 | 17500 | 0.4360 | 0.4445 | | 0.6397 | 2.72 | 18000 | 0.4348 | 0.4355 | | 0.6127 | 2.8 | 18500 | 0.4367 | 0.4318 | | 0.5956 | 2.87 | 19000 | 0.4376 | 0.4322 | | 0.6345 | 2.95 | 19500 | 0.4050 | 0.4308 | | 0.572 | 3.02 | 20000 | 0.4211 | 0.4219 | | 0.5447 | 3.1 | 20500 | 0.4042 | 0.4112 | | 0.5323 | 3.17 | 21000 | 0.4101 | 0.4153 | | 0.5677 | 3.25 | 21500 | 0.3952 | 0.4188 | | 0.5354 | 3.33 | 22000 | 0.3889 | 0.4007 | | 0.5297 | 3.4 | 22500 | 0.3793 | 0.3997 | | 0.5314 | 3.48 | 23000 | 0.3684 | 0.3956 | | 0.5217 | 3.55 | 23500 | 0.3572 | 0.3853 | | 0.5224 | 3.63 | 24000 | 0.3535 | 0.3867 | | 0.4983 | 3.7 | 24500 | 0.3636 | 0.3804 | | 0.5355 | 3.78 | 25000 | 0.3680 | 0.3770 | | 0.5115 | 3.85 | 25500 | 0.3472 | 0.3752 | | 0.5416 | 3.93 | 26000 | 0.3280 | 0.3689 | | 0.5104 | 4.01 | 26500 | 0.3319 | 0.3650 | | 0.4524 | 4.08 | 27000 | 0.3453 | 0.3632 | | 0.462 | 4.16 | 27500 | 0.3359 | 0.3600 | | 0.4823 | 4.23 | 28000 | 0.3268 | 0.3553 | | 0.4671 | 4.31 | 28500 | 0.3248 | 0.3535 | | 0.4702 | 4.38 | 29000 | 0.3278 | 0.3501 | | 0.483 | 4.46 | 29500 | 0.3183 | 0.3492 | | 0.4232 | 4.53 | 30000 | 0.3224 | 0.3470 | | 0.4227 | 4.61 | 30500 | 0.3171 | 0.3458 | | 0.4687 | 4.69 | 31000 | 0.3121 | 0.3537 | | 0.4486 | 4.76 | 31500 | 0.3088 | 0.3424 | | 0.4459 | 4.84 | 32000 | 0.3101 | 0.3407 | | 0.4513 | 4.91 | 32500 | 0.3077 | 0.3407 | | 0.4237 | 4.99 | 33000 | 0.3087 | 0.3402 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
PSW/samsum_percent20_maxsimdel
28f865f6037ca50e410b05551a7d378a0aa7beaa
2022-05-23T03:22:15.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent20_maxsimdel
1
null
transformers
32,117
Entry not found
PSW/samsum_percent20_minsimins
fff29403b875fb96c445cd39d4bb92b8076c1857
2022-05-23T03:40:32.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent20_minsimins
1
null
transformers
32,118
Entry not found
chrisvinsen/wav2vec2-7
f76d0fd94529c300ae257501586b54566d1f6e65
2022-05-23T08:09:15.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
chrisvinsen
null
chrisvinsen/wav2vec2-7
1
null
transformers
32,119
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-7 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-7 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6017 - Wer: 0.5200 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 400 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.1311 | 1.56 | 200 | 2.9839 | 1.0 | | 2.5727 | 3.12 | 400 | 1.4962 | 1.0209 | | 1.0187 | 4.69 | 600 | 0.7562 | 0.7859 | | 0.637 | 6.25 | 800 | 0.6529 | 0.6960 | | 0.4847 | 7.81 | 1000 | 0.6609 | 0.6745 | | 0.3952 | 9.38 | 1200 | 0.5808 | 0.6220 | | 0.3343 | 10.94 | 1400 | 0.5622 | 0.6004 | | 0.2897 | 12.5 | 1600 | 0.8842 | 0.5980 | | 0.2549 | 14.06 | 1800 | 0.6047 | 0.5765 | | 0.2334 | 15.62 | 2000 | 0.6436 | 0.5699 | | 0.2144 | 17.19 | 2200 | 0.5831 | 0.5593 | | 0.1982 | 18.75 | 2400 | 0.6327 | 0.5620 | | 0.1817 | 20.31 | 2600 | 0.8790 | 0.5456 | | 0.1713 | 21.88 | 2800 | 0.9603 | 0.5362 | | 0.163 | 23.44 | 3000 | 0.5940 | 0.5384 | | 0.1539 | 25.0 | 3200 | 0.6058 | 0.5311 | | 0.1392 | 26.56 | 3400 | 0.6131 | 0.5221 | | 0.1386 | 28.12 | 3600 | 0.6066 | 0.5258 | | 0.1351 | 29.69 | 3800 | 0.6017 | 0.5200 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
PSW/samsum_percent1_minsimdel
407133c0bc339f2056bba3f6d34f4f5d18e5c160
2022-05-23T05:19:21.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent1_minsimdel
1
null
transformers
32,120
Entry not found
PSW/samsum_percent1_randomdel
a410c4e0bd6f4543bf1bb927eeb41da65f2099ba
2022-05-23T05:30:35.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent1_randomdel
1
null
transformers
32,121
Entry not found
PSW/samsum_percent1_maxsimins
107dc3411830920ba6210eb56ff05733458b64bb
2022-05-23T05:40:48.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent1_maxsimins
1
null
transformers
32,122
Entry not found
PSW/samsum_percent1_randomins
1beb50d949e476bb0f6c9dc7eec283470a45a40d
2022-05-23T05:51:08.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent1_randomins
1
null
transformers
32,123
Entry not found
PSW/samsum_percent10_randomdel
dbaa432cc6f67535e126058fdd382a505f37a9d8
2022-05-23T06:19:11.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent10_randomdel
1
null
transformers
32,124
Entry not found
PSW/samsum_percent10_randomins
593205223489e2a1bd32b2fb1ddf93b96cfb4c89
2022-05-23T06:49:48.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent10_randomins
1
null
transformers
32,125
Entry not found
mriggs/tgb_epoch_1
f306b58a087c434a5ff62a28978119ed1060c09d
2022-05-23T06:50:47.000Z
[ "pytorch", "flaubert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
mriggs
null
mriggs/tgb_epoch_1
1
null
transformers
32,126
Entry not found
PSW/samsum_percent20_minsimdel
6dc06404f1f9796e93978bb1313b844434ef8ed5
2022-05-23T07:11:31.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent20_minsimdel
1
null
transformers
32,127
Entry not found
PSW/samsum_percent20_randomdel
ed574c670684299340d756e9ed1c58f49578024e
2022-05-23T07:29:52.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent20_randomdel
1
null
transformers
32,128
Entry not found
PSW/samsum_percent20_maxsimins
3f4b1221ac56955f7933c04bf23843aeba1b0dc3
2022-05-23T07:49:29.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent20_maxsimins
1
null
transformers
32,129
Entry not found
PSW/samsum_percent20_randomins
55246743200ab79dad510833bd8dcea66db827e5
2022-05-23T08:10:15.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_percent20_randomins
1
null
transformers
32,130
Entry not found
chrisvinsen/wav2vec2-8
1d7e0119d2e1337863b839b361fefcd546525e67
2022-05-23T10:57:42.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
chrisvinsen
null
chrisvinsen/wav2vec2-8
1
null
transformers
32,131
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-8 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.1169 - Wer: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0006 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 400 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:---:| | 3.9398 | 1.56 | 200 | 3.1250 | 1.0 | | 2.8703 | 3.12 | 400 | 3.1608 | 1.0 | | 2.8632 | 4.69 | 600 | 3.1329 | 1.0 | | 2.8638 | 6.25 | 800 | 3.0795 | 1.0 | | 2.8595 | 7.81 | 1000 | 3.1410 | 1.0 | | 2.8611 | 9.38 | 1200 | 3.0952 | 1.0 | | 2.861 | 10.94 | 1400 | 3.1391 | 1.0 | | 2.8603 | 12.5 | 1600 | 3.0639 | 1.0 | | 2.8568 | 14.06 | 1800 | 3.1180 | 1.0 | | 2.8563 | 15.62 | 2000 | 3.1170 | 1.0 | | 2.857 | 17.19 | 2200 | 3.0846 | 1.0 | | 2.8574 | 18.75 | 2400 | 3.0740 | 1.0 | | 2.8543 | 20.31 | 2600 | 3.1482 | 1.0 | | 2.8567 | 21.88 | 2800 | 3.1604 | 1.0 | | 2.8561 | 23.44 | 3000 | 3.1055 | 1.0 | | 2.858 | 25.0 | 3200 | 3.0669 | 1.0 | | 2.8524 | 26.56 | 3400 | 3.0992 | 1.0 | | 2.8557 | 28.12 | 3600 | 3.1050 | 1.0 | | 2.8527 | 29.69 | 3800 | 3.1169 | 1.0 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
hamidov02/wav2vec2-large-xls-r-300m-hungarian-colab
fbd5e3292b7d60dbacf7894284e276aa76d09fba
2022-05-23T13:18:12.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
hamidov02
null
hamidov02/wav2vec2-large-xls-r-300m-hungarian-colab
1
null
transformers
32,132
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-hungarian-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-hungarian-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.6404 - Wer: 0.4662 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.4833 | 4.0 | 400 | 0.6493 | 0.6491 | | 0.2282 | 8.0 | 800 | 0.6395 | 0.5555 | | 0.1612 | 12.0 | 1200 | 0.6841 | 0.5423 | | 0.1207 | 16.0 | 1600 | 0.6646 | 0.5224 | | 0.0929 | 20.0 | 2000 | 0.6355 | 0.4908 | | 0.0713 | 24.0 | 2400 | 0.6410 | 0.4711 | | 0.0613 | 28.0 | 2800 | 0.6404 | 0.4662 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
tursunali/bpt-2
0debf9e5ca418ea15fe81cffebe0ffc1cb29ba5a
2022-05-24T04:26:57.000Z
[ "pytorch", "jax", "gpt2", "text-generation", "de", "transformers" ]
text-generation
false
tursunali
null
tursunali/bpt-2
1
null
transformers
32,133
--- language: de widget: - text: "In einer schockierenden Entdeckung fanden Wissenschaftler eine Herde Einhörner, die in einem abgelegenen, zuvor unerforschten Tal in den Anden lebten." --- # BPT2 See the [GPT2 model card](https://huggingface.co/gpt2) for considerations on limitations and bias. See the [GPT2 documentation](https://huggingface.co/transformers/model_doc/gpt2.html) for details on GPT2. ## Usage ```python from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline tokenizer = AutoTokenizer.from_pretrained("tursunali/bpt2") model = AutoModelForCausalLM.from_pretrained("tursunali/bpt2") prompt = "<your prompt>" pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) print(pipe(prompt)[0]["generated_text"]) ``` Also, two tricks might improve the generated text: ```python output = model.generate( # during training an EOS token was used to mark the beginning of each text # so it can help to insert it at the start torch.tensor( [tokenizer.eos_token_id] + tokenizer.encode(prompt) ).unsqueeze(0), do_sample=True, # try setting bad_words_ids=[[0]] to disallow generating an EOS token, without this the model is # prone to ending generation early because a significant number of texts from the training corpus # is quite short bad_words_ids=[[0]], max_length=max_length, )[0] print(tokenizer.decode(output)) ``` ## Citing Please cite BPT2 as follows: ``` @misc{Backpacker_Trail_German_large_2022, author = {BackpackerTrail, Tursunali Kholdorov}, title = {{BPT2: Backpacker Trail German versions of BPT2}}, url = {https://github.com/Tursunali-Kholdorov/bptTrainer}, year = {2022} } ```
birdringxD/SSAP_ckpt
c067a05426982970fca04f70b6611c1e369069f3
2022-05-23T12:03:55.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
birdringxD
null
birdringxD/SSAP_ckpt
1
null
transformers
32,134
Entry not found
NabilOulbaz/bertweet_retrained_semEval2018
e7febfb53821bec9222c3eac435b1907baa2f4a4
2022-05-23T12:29:01.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
false
NabilOulbaz
null
NabilOulbaz/bertweet_retrained_semEval2018
1
null
transformers
32,135
--- license: mit ---
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_42
513e11bbda191c08131e5253d2d33d3bd0294ee0
2022-05-24T10:02:06.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_42
1
null
transformers
32,136
Entry not found
MeshalAlamr/wav2vec2-xls-r-300m-ar-10
abbce08a08d496384d1140cc24721f0da40fe5f4
2022-05-25T00:40:19.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
MeshalAlamr
null
MeshalAlamr/wav2vec2-xls-r-300m-ar-10
1
null
transformers
32,137
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-xls-r-300m-ar-10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-xls-r-300m-ar-10 This model is a fine-tuned version of [MeshalAlamr/wav2vec2-xls-r-300m-ar-9](https://huggingface.co/MeshalAlamr/wav2vec2-xls-r-300m-ar-9) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 87.0172 - Wer: 0.2017 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 64 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 56.409 | 4.71 | 400 | 81.8407 | 0.2151 | | 84.2726 | 9.41 | 800 | 82.6777 | 0.2237 | | 80.3604 | 14.12 | 1200 | 85.3856 | 0.2226 | | 70.7446 | 18.82 | 1600 | 87.9551 | 0.2180 | | 61.3713 | 23.53 | 2000 | 88.0419 | 0.2096 | | 54.5011 | 28.24 | 2400 | 87.0172 | 0.2017 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0 - Datasets 1.18.4 - Tokenizers 0.11.6
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_42
ab62c81ca720ae1747a2d88a57c6a7f7722cc256
2022-05-24T10:02:16.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_42
1
null
transformers
32,138
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_42
8bba4a65a06041d528131266192c4da034285874
2022-05-24T10:02:26.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_42
1
null
transformers
32,139
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_42
7527836851576547be0437c2d21b97e21b982370
2022-05-24T10:02:36.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_42
1
null
transformers
32,140
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_42
260f1fc2071e827bd2dfde833eeb63260d482a9f
2022-05-24T10:05:25.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_42
1
null
transformers
32,141
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_42
e1d87333390596e57b8ddd2f03c71d1047330446
2022-05-24T10:05:35.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_42
1
null
transformers
32,142
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_42
391c7f931b419937f4a1d81a63ac2d5304c82f3c
2022-05-24T10:05:45.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_42
1
null
transformers
32,143
Entry not found
paola-md/recipe-steps-en
818c590583c77a1d20ebde3a4042333020ae6e14
2022-05-23T16:35:26.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
paola-md
null
paola-md/recipe-steps-en
1
null
transformers
32,144
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_42
f7da9a1eccef5506470e840a933a19e2cdbb9e18
2022-05-24T10:05:55.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_42
1
null
transformers
32,145
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_42
dc5be89e6a9860e9f089e610ee947c98999a2fee
2022-05-24T10:08:44.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_42
1
null
transformers
32,146
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_42
0268bb5565fe673c7185147f6da0da57296f66cd
2022-05-24T10:09:04.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_42
1
null
transformers
32,147
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_42
107e238c1c5fe619d3bbd66e89b38b86602d7561
2022-05-24T10:09:14.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_42
1
null
transformers
32,148
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_43
fc4e9b56dc12fdb37fe1775a3aeefc595417e69b
2022-05-24T10:02:08.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_43
1
null
transformers
32,149
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_43
7b7d1ac58bd18a1d2986dd964ddd4e6356b871cd
2022-05-24T10:02:18.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_43
1
null
transformers
32,150
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_43
d622f1f22d63d0fa87aa39b56bfa5fd23606c143
2022-05-24T10:02:28.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_43
1
null
transformers
32,151
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_43
f6296bc7f0ef9be4828677fa0245d49f8ce07c0c
2022-05-24T10:02:38.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_43
1
null
transformers
32,152
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_43
b3e9a7a86961169bad674433467870a6a7ead4b3
2022-05-24T10:05:27.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_43
1
null
transformers
32,153
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_43
d049f7c9b8bbcf2f761e177c2ada83e8a5963b6a
2022-05-24T10:05:37.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_43
1
null
transformers
32,154
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_43
e5c6abe205b0cb561c2f8ab1941b1ad8d5471757
2022-05-24T10:05:47.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_43
1
null
transformers
32,155
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_43
7544e909acecd2c7cc12e22957c3a9d6c521c35b
2022-05-24T10:05:57.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_43
1
null
transformers
32,156
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_43
560c0bbb036261033c5e8886ef2746c755ae03eb
2022-05-24T10:08:46.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_43
1
null
transformers
32,157
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.5-class.exclusive.seed_43
5fed7cbf7633657d2cfdc0542baece126a272f73
2022-05-24T10:08:56.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.5-class.exclusive.seed_43
1
null
transformers
32,158
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_43
dfdf0343ce8066fc290e556e0a7237f40d0839af
2022-05-24T10:09:06.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_43
1
null
transformers
32,159
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_43
369079d738d680b0de093acbd105fdf1dfd73638
2022-05-24T10:09:16.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_43
1
null
transformers
32,160
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_44
bd71ef37e481501f896900dead6ad5a8e8f5a6f8
2022-05-24T10:02:10.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_44
1
null
transformers
32,161
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_44
fec62402a2386dd8f7614e4a95ba0848da1cad31
2022-05-24T10:02:20.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_44
1
null
transformers
32,162
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_44
f40a0b5491047a0a6543319168f8aedf3375d934
2022-05-24T10:02:30.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_44
1
null
transformers
32,163
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_44
190996fbd735e0ddb3a022464c5c5ff57a405183
2022-05-24T10:02:40.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_44
1
null
transformers
32,164
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_44
40968b63f1577e37dfda198420c2096b22a4a378
2022-05-24T10:05:29.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_44
1
null
transformers
32,165
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_44
8df2b0c15f7b4882c91b23cc0d95eedf53926117
2022-05-24T10:05:39.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_44
1
null
transformers
32,166
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_44
ccca10e4d97f629ac39e49cc707a85cde28e21a0
2022-05-24T10:05:49.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_44
1
null
transformers
32,167
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_44
00cc9689ca2c8c31115d681f91d3bafcce56e69b
2022-05-24T10:05:59.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_44
1
null
transformers
32,168
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_44
efc85030849515ac9da5697d6d5a093d644d43a7
2022-05-24T10:08:48.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_44
1
null
transformers
32,169
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.5-class.exclusive.seed_44
bc26bf80af4e3af1a327c831a89b89852b20a025
2022-05-24T10:08:58.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.5-class.exclusive.seed_44
1
null
transformers
32,170
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_44
0ebc3d2e8efd6acdaf62d857e3aebe76b1c4ddd5
2022-05-24T10:09:08.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_44
1
null
transformers
32,171
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_44
6f7701535d0b48946979c647bdec6c63d138307c
2022-05-24T10:09:18.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_44
1
null
transformers
32,172
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_45
e9f377f5f031892472758a4642e1381e64827a43
2022-05-24T10:02:12.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_45
1
null
transformers
32,173
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_45
703df9ff5228222501ad5eabb763794d21d73f53
2022-05-24T10:02:22.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_45
1
null
transformers
32,174
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_45
9664c38da6d02878480ff4f5745c14b7588b4c89
2022-05-24T10:02:32.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_45
1
null
transformers
32,175
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_45
c1142523a8e643a5537d7dcf08e247b38e87e707
2022-05-24T10:02:42.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_45
1
null
transformers
32,176
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_45
05c7996437bf001f6349407eace20f36664f467d
2022-05-24T10:05:31.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_45
1
null
transformers
32,177
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_45
18dce6e5076157cdf9e1e632db0d053a13ebbd7c
2022-05-24T10:05:41.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_45
1
null
transformers
32,178
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_45
87d4253fa72113ced6e63292c3a64f2b9dfc0546
2022-05-24T10:05:51.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_45
1
null
transformers
32,179
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_45
2644c8b7eb4b95465c206e51ecc5487f87bcc0a8
2022-05-24T10:06:01.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_45
1
null
transformers
32,180
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_45
7a3d6d5fe3553e5acdcca477fff7c84b9a266d4f
2022-05-24T10:08:50.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_45
1
null
transformers
32,181
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.5-class.exclusive.seed_45
b3d83fc26ea91f5837643b3aef523d8b29016c9a
2022-05-24T10:09:00.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.5-class.exclusive.seed_45
1
null
transformers
32,182
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_45
7b08f8a66dc2e12d717ecd38f4f0d873a403822d
2022-05-24T10:09:10.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_45
1
null
transformers
32,183
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_45
c12f827d790fb6b4509d9251422530acb3183c65
2022-05-24T10:09:20.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_45
1
null
transformers
32,184
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_46
1ccba82d9a467adf6a3cf9e8878eb88fb8219fb3
2022-05-24T10:02:14.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.2-class.exclusive.seed_46
1
null
transformers
32,185
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_46
bde06f221b8e4d0c4d61d19826363b815ce5bc37
2022-05-24T10:02:24.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.2-class.exclusive.seed_46
1
null
transformers
32,186
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_46
e94dad614b1444aa94026f409f460860299a5302
2022-05-24T10:02:34.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.2-class.exclusive.seed_46
1
null
transformers
32,187
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_46
fe302e1f96792ef18d7d20c42e1715172892a576
2022-05-24T10:02:44.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.2-class.exclusive.seed_46
1
null
transformers
32,188
Entry not found
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_46
06a5e7817c0e14d4cb00dc8d46736a611e7344b9
2022-05-24T10:05:33.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.3-class.exclusive.seed_46
1
null
transformers
32,189
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_46
df8bfbc04c6903c933fda0cade177a9e2b565cb3
2022-05-24T10:05:43.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.3-class.exclusive.seed_46
1
null
transformers
32,190
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_46
a9887176c6bc21715f154b2334cddb020235fdf0
2022-05-24T10:05:53.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.3-class.exclusive.seed_46
1
null
transformers
32,191
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_46
f9c9f2857385835dd15863ae7f934c0f6486bc51
2022-05-24T10:06:03.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.3-class.exclusive.seed_46
1
null
transformers
32,192
Entry not found
transformertroy/t5-small-finetuned-tds
62b4a76a051d0fe7a26d7927457d3c6e0f223b14
2022-06-01T17:10:46.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "medium-summarization", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
transformertroy
null
transformertroy/t5-small-finetuned-tds
1
null
transformers
32,193
--- license: apache-2.0 tags: - medium-summarization - generated_from_trainer model-index: - name: t5-small-finetuned-tds results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-tds This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_46
6a54b4c84b81392d68e5efad8deb294686637696
2022-05-24T10:08:52.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_46
1
null
transformers
32,194
Entry not found
CEBaB/lstm.CEBaB.causalm.food__service.5-class.exclusive.seed_46
90a3f68e07ace9a08b597d6703efa2f833717a10
2022-05-24T10:09:02.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.food__service.5-class.exclusive.seed_46
1
null
transformers
32,195
Entry not found
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_46
ecace0c0ccdd765f183f3c16de1560523f1ce2b4
2022-05-24T10:09:12.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.noise__food.5-class.exclusive.seed_46
1
null
transformers
32,196
Entry not found
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_46
8c339ddbee561de0ef2c812c3bb69ec341f758d5
2022-05-24T10:09:22.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.service__food.5-class.exclusive.seed_46
1
null
transformers
32,197
Entry not found
CEBaB/lstm.CEBaB.causalm.None__None.2-class.exclusive.seed_42
8da581f7cd927a7488a9db7b04fe5fcd1b41431d
2022-05-24T10:01:56.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.None__None.2-class.exclusive.seed_42
1
null
transformers
32,198
Entry not found
CEBaB/lstm.CEBaB.causalm.None__None.3-class.exclusive.seed_42
01e073c324fd67657ca54b0c336e954538ae2753
2022-05-24T10:05:15.000Z
[ "pytorch", "lstm_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/lstm.CEBaB.causalm.None__None.3-class.exclusive.seed_42
1
null
transformers
32,199
Entry not found