modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
CEBaB/gpt2.CEBaB.causalm.food__service.2-class.exclusive.seed_46
1168bf599b19d00eaa8e3c9c3713470a62192712
2022-05-24T10:04:53.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.food__service.2-class.exclusive.seed_46
2
null
transformers
26,100
Entry not found
CEBaB/gpt2.CEBaB.causalm.noise__food.2-class.exclusive.seed_42
1caed6e87b767b532fb90e305377fb98959427f4
2022-05-24T10:04:55.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.noise__food.2-class.exclusive.seed_42
2
null
transformers
26,101
Entry not found
CEBaB/gpt2.CEBaB.causalm.noise__food.2-class.exclusive.seed_43
99d4815b621dd252875116bb77310fbeb9ce7741
2022-05-24T10:04:57.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.noise__food.2-class.exclusive.seed_43
2
null
transformers
26,102
Entry not found
CEBaB/gpt2.CEBaB.causalm.noise__food.2-class.exclusive.seed_46
8619a5f34e20a3acd198f5125e0276adb9501d78
2022-05-24T10:05:03.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.noise__food.2-class.exclusive.seed_46
2
null
transformers
26,103
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.2-class.exclusive.seed_42
a9117264cc2fd479399d80e9934784e04e9d922d
2022-05-24T10:05:05.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.2-class.exclusive.seed_42
2
null
transformers
26,104
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.2-class.exclusive.seed_45
f20af62802888bdb5e9c84d62ec8e8c063e9429f
2022-05-24T10:05:11.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.2-class.exclusive.seed_45
2
null
transformers
26,105
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.3-class.exclusive.seed_42
c705274c20f260ee00d0d6241d58fa1196986a16
2022-05-24T10:08:24.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.3-class.exclusive.seed_42
2
null
transformers
26,106
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.3-class.exclusive.seed_43
1fb1fd17929f2bc85a7bca3d99dbec86c57c3bf3
2022-05-24T10:08:26.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.3-class.exclusive.seed_43
2
null
transformers
26,107
Entry not found
CEBaB/bert-base-uncased.CEBaB.causalm.noise__food.5-class.exclusive.seed_46
dc11f75131de7c2a4f69f4981e95cb244c2ccee7
2022-05-24T10:10:02.000Z
[ "pytorch", "bert_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/bert-base-uncased.CEBaB.causalm.noise__food.5-class.exclusive.seed_46
2
null
transformers
26,108
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_42
b5e7f51550ca3102ab17999c581c661184e6299c
2022-05-24T10:11:45.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_42
2
null
transformers
26,109
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_43
9b3d7ee19a8d991fb453c81852f4844d4e7209bb
2022-05-24T10:11:47.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_43
2
null
transformers
26,110
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_44
fd188d31a4d3a1ea04666c59d0734ef93518ef0d
2022-05-24T10:11:49.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_44
2
null
transformers
26,111
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_45
65040dacf00db2a5fe1b59359e1cd3ff2449c776
2022-05-24T10:11:51.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_45
2
null
transformers
26,112
Entry not found
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_46
fdde94f1ca1cecb5811ed17b9726cc26402155b0
2022-05-24T10:11:53.000Z
[ "pytorch", "gpt2_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/gpt2.CEBaB.causalm.service__food.5-class.exclusive.seed_46
2
null
transformers
26,113
Entry not found
stephenleejm/T5_yoda_translator
75c5901f0ca30e934d941d666edeb3b67d51f037
2022-06-06T07:01:44.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
stephenleejm
null
stephenleejm/T5_yoda_translator
2
null
transformers
26,114
# Introduction This model translate between Yoda-ish to English and vice versa. It makes use of the [T5-base](https://huggingface.co/t5-base) model and finetuning. Basically it trains for 2 tasks using the same dataset. In Yoda-ish to English, trains # Dataset For this first version of the model I used a small sample of 20 Yoda quotes for training. I am in the midst of collecting more samples for training. # Usage **Input** For Yoda-ish to English, you can use the prefix "y_to_e: text" to pass in as the input. For English to Yodaish you can use the prefix "e_to_y: text" **Output** The translated sentence. E.g e_to_y: I am sick of you => Sick of you, I am # Spaces To try this model you can access it [here](https://huggingface.co/spaces/stephenleejm/yoda_translator)
hamidov02/wav2vec2-large-xls-r-53h-turkish-colab
3c74121ff05f55dd9867dd58ca9d8294778addb3
2022-05-24T08:50:22.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
hamidov02
null
hamidov02/wav2vec2-large-xls-r-53h-turkish-colab
2
null
transformers
26,115
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-53h-turkish-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-53h-turkish-colab This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.4135 - Wer: 0.3247 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 32 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 9.4875 | 0.92 | 100 | 3.5328 | 1.0 | | 3.1866 | 1.83 | 200 | 3.0955 | 1.0 | | 2.027 | 2.75 | 300 | 0.9002 | 0.7685 | | 0.7285 | 3.67 | 400 | 0.6279 | 0.6693 | | 0.4693 | 4.59 | 500 | 0.5672 | 0.5643 | | 0.3615 | 5.5 | 600 | 0.4995 | 0.5094 | | 0.2846 | 6.42 | 700 | 0.4561 | 0.4797 | | 0.2253 | 7.34 | 800 | 0.4742 | 0.4675 | | 0.2004 | 8.26 | 900 | 0.4462 | 0.4345 | | 0.173 | 9.17 | 1000 | 0.4688 | 0.4333 | | 0.1547 | 10.09 | 1100 | 0.4429 | 0.4206 | | 0.1444 | 11.01 | 1200 | 0.4662 | 0.4144 | | 0.1274 | 11.93 | 1300 | 0.4675 | 0.4213 | | 0.1164 | 12.84 | 1400 | 0.4947 | 0.4073 | | 0.1081 | 13.76 | 1500 | 0.4223 | 0.3915 | | 0.1025 | 14.68 | 1600 | 0.4493 | 0.3912 | | 0.0944 | 15.6 | 1700 | 0.4527 | 0.3848 | | 0.0943 | 16.51 | 1800 | 0.4288 | 0.3810 | | 0.0885 | 17.43 | 1900 | 0.4313 | 0.3670 | | 0.0781 | 18.35 | 2000 | 0.4729 | 0.3790 | | 0.0828 | 19.27 | 2100 | 0.4560 | 0.3651 | | 0.0753 | 20.18 | 2200 | 0.4478 | 0.3599 | | 0.0702 | 21.1 | 2300 | 0.4518 | 0.3595 | | 0.0666 | 22.02 | 2400 | 0.4080 | 0.3489 | | 0.0661 | 22.94 | 2500 | 0.4414 | 0.3507 | | 0.0607 | 23.85 | 2600 | 0.4209 | 0.3538 | | 0.058 | 24.77 | 2700 | 0.4302 | 0.3382 | | 0.0596 | 25.69 | 2800 | 0.3939 | 0.3328 | | 0.052 | 26.61 | 2900 | 0.4374 | 0.3311 | | 0.0473 | 27.52 | 3000 | 0.4406 | 0.3363 | | 0.0483 | 28.44 | 3100 | 0.4272 | 0.3286 | | 0.049 | 29.36 | 3200 | 0.4189 | 0.3257 | | 0.0433 | 30.28 | 3300 | 0.4242 | 0.3229 | | 0.0438 | 31.19 | 3400 | 0.4135 | 0.3247 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
himanshubeniwal/bert_sst_ft
b3116ad41cdf552c4865dc89576f528f3409b6dc
2022-05-24T05:58:34.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
himanshubeniwal
null
himanshubeniwal/bert_sst_ft
2
null
transformers
26,116
Entry not found
KoichiYasuoka/deberta-base-japanese-upos
57e025f218c419ae19f8d91ee956b12776fea31a
2022-05-24T08:16:37.000Z
[ "pytorch", "deberta-v2", "token-classification", "ja", "dataset:universal_dependencies", "transformers", "japanese", "pos", "dependency-parsing", "license:cc-by-sa-4.0", "autotrain_compatible" ]
token-classification
false
KoichiYasuoka
null
KoichiYasuoka/deberta-base-japanese-upos
2
null
transformers
26,117
--- language: - "ja" tags: - "japanese" - "token-classification" - "pos" - "dependency-parsing" datasets: - "universal_dependencies" license: "cc-by-sa-4.0" pipeline_tag: "token-classification" widget: - text: "国境の長いトンネルを抜けると雪国であった。" --- # deberta-base-japanese-upos ## Model Description This is a DeBERTa(V2) model pre-trained on 青空文庫 texts for POS-tagging and dependency-parsing, derived from [deberta-base-japanese-aozora](https://huggingface.co/KoichiYasuoka/deberta-base-japanese-aozora). Every short-unit-word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech). ## How to Use ```py import torch from transformers import AutoTokenizer,AutoModelForTokenClassification tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/deberta-base-japanese-upos") model=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/deberta-base-japanese-upos") s="国境の長いトンネルを抜けると雪国であった。" t=tokenizer.tokenize(s) p=[model.config.id2label[q] for q in torch.argmax(model(tokenizer.encode(s,return_tensors="pt"))["logits"],dim=2)[0].tolist()[1:-1]] print(list(zip(t,p))) ``` or ```py import esupar nlp=esupar.load("KoichiYasuoka/deberta-base-japanese-upos") print(nlp("国境の長いトンネルを抜けると雪国であった。")) ``` ## See Also [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa/DeBERTa models
PontifexMaximus/opus-mt-de-en-finetuned-de-to-en
80af21dcc659d20bdaaa7545c38e918befd2bc2b
2022-05-24T11:38:56.000Z
[ "pytorch", "tensorboard", "marian", "text2text-generation", "dataset:wmt14", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
PontifexMaximus
null
PontifexMaximus/opus-mt-de-en-finetuned-de-to-en
2
null
transformers
26,118
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wmt14 model-index: - name: opus-mt-de-en-finetuned-de-to-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-de-en-finetuned-de-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-de-en](https://huggingface.co/Helsinki-NLP/opus-mt-de-en) on the wmt14 dataset. It achieves the following results on the evaluation set: - eval_loss: 1.3411 - eval_bleu: 32.4395 - eval_gen_len: 29.6925 - eval_runtime: 2250.0489 - eval_samples_per_second: 19.998 - eval_steps_per_second: 0.625 - epoch: 3.0 - step: 4221 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-06 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 11 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.19.2 - Pytorch 1.7.1+cu110 - Datasets 2.2.2 - Tokenizers 0.12.1
AswiN037/sentence-t-roberta-large-wechsel-tamil
7dde5bdc7f43827907a98fe4c5aea4c85b4c5074
2022-05-25T08:55:45.000Z
[ "pytorch", "roberta", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
AswiN037
null
AswiN037/sentence-t-roberta-large-wechsel-tamil
2
1
sentence-transformers
26,119
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # sent-Roberta-wechsel-tamil This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
masoumehb/wav2vec2-large-xlsr-persian-v3
918f655ca45ef4b729b496288139114a3fdf2b1a
2022-05-24T13:55:20.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
masoumehb
null
masoumehb/wav2vec2-large-xlsr-persian-v3
2
0
transformers
26,120
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xlsr-persian-v3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-persian-v3 This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0 - Datasets 1.13.3 - Tokenizers 0.10.3
kimcando/test3
e5974ce3ec6611a701e5fc4a56467b1639771cd6
2022-05-24T13:12:54.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
kimcando
null
kimcando/test3
2
null
transformers
26,121
Entry not found
hamidov02/wav2vec2-large-xls-hun-53h-colab
66afc1b2a9cc64f03b719eff47783cd787c23f4e
2022-05-24T19:38:50.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
hamidov02
null
hamidov02/wav2vec2-large-xls-hun-53h-colab
2
null
transformers
26,122
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-hun-53h-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-hun-53h-colab This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.6027 - Wer: 0.4618 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 23 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 13.4225 | 0.67 | 100 | 3.7750 | 1.0 | | 3.4121 | 1.34 | 200 | 3.3166 | 1.0 | | 3.2263 | 2.01 | 300 | 3.1403 | 1.0 | | 3.0038 | 2.68 | 400 | 2.2474 | 0.9990 | | 1.2243 | 3.35 | 500 | 0.8174 | 0.7666 | | 0.6368 | 4.03 | 600 | 0.6306 | 0.6633 | | 0.4426 | 4.7 | 700 | 0.6151 | 0.6648 | | 0.3821 | 5.37 | 800 | 0.5765 | 0.6138 | | 0.3337 | 6.04 | 900 | 0.5522 | 0.5785 | | 0.2832 | 6.71 | 1000 | 0.5822 | 0.5691 | | 0.2485 | 7.38 | 1100 | 0.5626 | 0.5449 | | 0.2335 | 8.05 | 1200 | 0.5866 | 0.5662 | | 0.2031 | 8.72 | 1300 | 0.5574 | 0.5420 | | 0.1925 | 9.39 | 1400 | 0.5572 | 0.5297 | | 0.1793 | 10.07 | 1500 | 0.5878 | 0.5185 | | 0.1652 | 10.74 | 1600 | 0.6173 | 0.5243 | | 0.1663 | 11.41 | 1700 | 0.5807 | 0.5133 | | 0.1544 | 12.08 | 1800 | 0.5979 | 0.5154 | | 0.148 | 12.75 | 1900 | 0.5545 | 0.4986 | | 0.138 | 13.42 | 2000 | 0.5798 | 0.4947 | | 0.1353 | 14.09 | 2100 | 0.5670 | 0.5028 | | 0.1283 | 14.76 | 2200 | 0.5862 | 0.4957 | | 0.1271 | 15.43 | 2300 | 0.6009 | 0.4961 | | 0.1108 | 16.11 | 2400 | 0.5873 | 0.4975 | | 0.1182 | 16.78 | 2500 | 0.6013 | 0.4893 | | 0.103 | 17.45 | 2600 | 0.6165 | 0.4898 | | 0.1084 | 18.12 | 2700 | 0.6186 | 0.4838 | | 0.1014 | 18.79 | 2800 | 0.6122 | 0.4767 | | 0.1009 | 19.46 | 2900 | 0.5981 | 0.4793 | | 0.1004 | 20.13 | 3000 | 0.6034 | 0.4770 | | 0.0922 | 20.8 | 3100 | 0.6127 | 0.4663 | | 0.09 | 21.47 | 3200 | 0.5967 | 0.4672 | | 0.0893 | 22.15 | 3300 | 0.6051 | 0.4611 | | 0.0817 | 22.82 | 3400 | 0.6027 | 0.4618 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
laituan245/rag-token-bart-base
bb99d38de82f700b6ee325010e2ac8980c998f29
2022-05-24T17:37:03.000Z
[ "pytorch", "rag", "transformers" ]
null
false
laituan245
null
laituan245/rag-token-bart-base
2
null
transformers
26,123
This model is a non-finetuned RAG-Token model and was created as follows: ```python from transformers import RagTokenizer, RagTokenForGeneration, AutoTokenizer model = RagTokenForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-base" ) question_encoder_tokenizer = AutoTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator_tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base") tokenizer = RagTokenizer(question_encoder_tokenizer, generator_tokenizer) model.config.use_dummy_dataset = True model.config.index_name = "exact" model.save_pretrained("./") tokenizer.save_pretrained("./") ```
ronanki/ml_use_13
8318fbdd41a7436abf23ae1aa5fcd5cb5ca39eb6
2022-05-24T17:43:08.000Z
[ "pytorch", "distilbert", "feature-extraction", "sentence-transformers", "sentence-similarity" ]
sentence-similarity
false
ronanki
null
ronanki/ml_use_13
2
null
sentence-transformers
26,124
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # ronanki/ml_use_13 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 512 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('ronanki/ml_use_13') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=ronanki/ml_use_13) ## Training The model was trained with the parameters: **DataLoader**: `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 8 with parameters: ``` {'batch_size': 4} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 3, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 0, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 512, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
laituan245/rag-sequence-bart-base
1fba766a2b8b6e7bd58475afcd7b5f12c19bd205
2022-05-24T17:49:50.000Z
[ "pytorch", "rag", "transformers" ]
null
false
laituan245
null
laituan245/rag-sequence-bart-base
2
null
transformers
26,125
This model is a non-finetuned RAG-Token model and was created as follows: ```python from transformers import RagTokenizer, RagSequenceForGeneration, AutoTokenizer model = RagSequenceForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-base" ) question_encoder_tokenizer = AutoTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator_tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base") tokenizer = RagTokenizer(question_encoder_tokenizer, generator_tokenizer) model.config.use_dummy_dataset = True model.config.index_name = "exact" model.save_pretrained("./") tokenizer.save_pretrained("./") ```
castorini/afriteva_small
6760505fb3977fb31c4e58050e6bb207d085fb48
2022-05-24T20:16:49.000Z
[ "pytorch", "t5", "text2text-generation", "om", "am", "rw", "rn", "ha", "ig", "pcm", "so", "sw", "ti", "yo", "multilingual", "T5", "transformers", "autotrain_compatible" ]
text2text-generation
false
castorini
null
castorini/afriteva_small
2
null
transformers
26,126
Hugging Face's logo --- language: - om - am - rw - rn - ha - ig - pcm - so - sw - ti - yo - multilingual - T5 --- # afriteva_small ## Model desription AfriTeVa small is a sequence to sequence model pretrained on 10 African languages ## Languages Afaan Oromoo(orm), Amharic(amh), Gahuza(gah), Hausa(hau), Igbo(igb), Nigerian Pidgin(pcm), Somali(som), Swahili(swa), Tigrinya(tig), Yoruba(yor) ### More information on the model, dataset: ### The model - 64M parameters encoder-decoder architecture (T5-like) - 6 layers, 8 attention heads and 512 token sequence length ### The dataset - Multilingual: 10 African languages listed above - 143 Million Tokens (1GB of text data) - Tokenizer Vocabulary Size: 70,000 tokens ## Intended uses & limitations `afriteva_small` is pre-trained model and primarily aimed at being fine-tuned on multilingual sequence-to-sequence tasks. ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("castorini/afriteva_small") >>> model = AutoModelForSeq2SeqLM.from_pretrained("castorini/afriteva_small") >>> src_text = "Ó hùn ọ́ láti di ara wa bí?" >>> tgt_text = "Would you like to be?" >>> model_inputs = tokenizer(src_text, return_tensors="pt") >>> with tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors="pt").input_ids >>> model(**model_inputs, labels=labels) # forward pass ``` ## Training Procedure For information on training procedures, please refer to the AfriTeVa [paper](#) or [repository](https://github.com/castorini/afriteva) ## BibTex entry and Citation info coming soon ...
emilylearning/cond_ft_none_on_reddit__prcnt_20__test_run_False__xlm-roberta-base
3f38d904b6697273597152a7ab7cd6fc1a7356c6
2022-05-25T07:53:16.000Z
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
emilylearning
null
emilylearning/cond_ft_none_on_reddit__prcnt_20__test_run_False__xlm-roberta-base
2
null
transformers
26,127
Entry not found
logo-data-science/distilbert-finetuned
52df852bf7005730e1318365d661a0ad7cb3ffd3
2022-05-25T05:55:44.000Z
[ "pytorch", "distilbert", "question-answering", "transformers", "license:gpl", "autotrain_compatible" ]
question-answering
false
logo-data-science
null
logo-data-science/distilbert-finetuned
2
null
transformers
26,128
--- license: gpl ---
emilylearning/cond_ft_subreddit_on_reddit__prcnt_20__test_run_False__xlm-roberta-base
39d9e4e50aa97a534d2767fd1b5d3d7684403b63
2022-05-25T11:09:44.000Z
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
emilylearning
null
emilylearning/cond_ft_subreddit_on_reddit__prcnt_20__test_run_False__xlm-roberta-base
2
null
transformers
26,129
Entry not found
OHenry/OHenry
0100bcf7c643affd201c0a8abdb585762d9b5103
2022-05-25T09:16:44.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
OHenry
null
OHenry/OHenry
2
null
transformers
26,130
Entry not found
jimypbr/bart-large-test
9693a315d1b8b0d8cee8220e883647c8e8e8aa5e
2022-05-25T12:02:26.000Z
[ "pytorch", "bart", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
jimypbr
null
jimypbr/bart-large-test
2
null
transformers
26,131
--- license: apache-2.0 tags: - generated_from_trainer datasets: - cnn_dailymail model-index: - name: outputs results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # outputs This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on the cnn_dailymail 3.0.0 dataset. ## Model description More information needed ## Intended uses & limitations This is a work in progress. Please don't use these weights. ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 1 - eval_batch_size: 2 - seed: 42 - distributed_type: IPU - gradient_accumulation_steps: 256 - total_train_batch_size: 2048 - total_eval_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 2.0 - training precision: Mixed Precision ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.0+cpu - Datasets 2.2.1 - Tokenizers 0.12.1
thundaa/tape-fluorescence-prediction-RITA_s
ca4464f7e270be50782aefeaa3b11eed7fb29d50
2022-05-26T15:37:58.000Z
[ "pytorch", "rita", "text-classification", "dataset:train", "transformers", "protein language model", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
thundaa
null
thundaa/tape-fluorescence-prediction-RITA_s
2
null
transformers
26,132
--- license: apache-2.0 tags: - protein language model - generated_from_trainer datasets: - train metrics: - spearmanr model-index: - name: tape-fluorescence-prediction-RITA_s results: - task: name: Text Classification type: text-classification dataset: name: cradle-bio/tape-fluorescence type: train metrics: - name: Spearmanr type: spearmanr value: 0.2955275250425323 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tape-fluorescence-prediction-RITA_s This model is a fine-tuned version of [lightonai/RITA_s](https://huggingface.co/lightonai/RITA_s) on the cradle-bio/tape-fluorescence dataset. It achieves the following results on the evaluation set: - Loss: 0.5855 - Spearmanr: 0.2955 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 128 - total_train_batch_size: 4096 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Spearmanr | |:-------------:|:-----:|:----:|:---------------:|:---------:| | 4.3595 | 0.85 | 4 | 0.7057 | 0.0940 | | 0.8654 | 1.85 | 8 | 0.6873 | 0.1280 | | 0.8292 | 2.85 | 12 | 0.6835 | 0.2290 | | 0.8212 | 3.85 | 16 | 0.6837 | 0.3110 | | 0.8191 | 4.85 | 20 | 0.6799 | 0.3281 | | 0.8137 | 5.85 | 24 | 0.6748 | 0.3277 | | 0.8057 | 6.85 | 28 | 0.6592 | 0.3162 | | 0.7769 | 7.85 | 32 | 0.6283 | 0.3065 | | 0.7382 | 8.85 | 36 | 0.6103 | 0.2795 | | 0.5991 | 9.85 | 40 | 0.5855 | 0.2955 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
shoubhik/electra_finetune
cbeeea14dcb68108a144096e5a44777d9c998ad4
2022-05-25T13:12:48.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
shoubhik
null
shoubhik/electra_finetune
2
null
transformers
26,133
Entry not found
StephennFernandes/xls-r-300m-common_voice-ta
f5f84c8507998fa749822a4a325a6f03d2b5b039
2022-05-26T10:35:42.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
StephennFernandes
null
StephennFernandes/xls-r-300m-common_voice-ta
2
null
transformers
26,134
Entry not found
xuio/roberta-sts
2dae31cbe2d859b42011a6da878bc1e52fcb7b4e
2022-05-26T01:38:59.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
xuio
null
xuio/roberta-sts
2
null
transformers
26,135
Entry not found
emilylearning/cond_ft_none_on_reddit__prcnt_na__test_run_True__xlm-roberta-base
74284501ffc87ae8d68f2290c4615b6b643423a3
2022-05-25T23:02:02.000Z
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
emilylearning
null
emilylearning/cond_ft_none_on_reddit__prcnt_na__test_run_True__xlm-roberta-base
2
null
transformers
26,136
Entry not found
austin/t5_austin_large
a384ebc67b0ef86bd4482e34b2d185e05b702313
2022-06-02T20:29:54.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
austin
null
austin/t5_austin_large
2
null
transformers
26,137
Entry not found
shoubhik/electra_freezed_9th_layer
19da4484d87f750e1a4fe5bab359211eab54fb78
2022-05-26T11:58:03.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
shoubhik
null
shoubhik/electra_freezed_9th_layer
2
null
transformers
26,138
Entry not found
aioxlabs/dvoice-wolof
27e94ebd43ba590ef612f121c740661577adbea9
2022-05-28T08:22:16.000Z
[ "wav2vec2", "feature-extraction", "wo", "dataset:commonvoice", "speechbrain", "CTC", "pytorch", "Transformer", "license:apache-2.0", "automatic-speech-recognition" ]
automatic-speech-recognition
false
aioxlabs
null
aioxlabs/dvoice-wolof
2
null
speechbrain
26,139
--- language: "wo" thumbnail: pipeline_tag: automatic-speech-recognition tags: - CTC - pytorch - speechbrain - Transformer license: "apache-2.0" datasets: - commonvoice metrics: - wer - cer --- <iframe src="https://ghbtns.com/github-btn.html?user=speechbrain&repo=speechbrain&type=star&count=true&size=large&v=2" frameborder="0" scrolling="0" width="170" height="30" title="GitHub"></iframe> <br/><br/> # wav2vec 2.0 with CTC/Attention trained on DVoice Wolof (No LM) This repository provides all the necessary tools to perform automatic speech recognition from an end-to-end system pretrained on a [ALFFA](https://github.com/besacier/ALFFA_PUBLIC) Wolof dataset within SpeechBrain. For a better experience, we encourage you to learn more about [SpeechBrain](https://speechbrain.github.io). | DVoice Release | Val. CER | Val. WER | Test CER | Test WER | |:-------------:|:---------------------------:| -----:| -----:| -----:| | v2.0 | 4.81 | 16.25 | 4.83 | 16.05 | # Pipeline description This ASR system is composed of 2 different but linked blocks: - Tokenizer (unigram) that transforms words into subword units and trained with the train transcriptions. - Acoustic model (wav2vec2.0 + CTC). A pretrained wav2vec 2.0 model ([facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53)) is combined with two DNN layers and finetuned on the Darija dataset. The obtained final acoustic representation is given to the CTC greedy decoder. The system is trained with recordings sampled at 16kHz (single channel). The code will automatically normalize your audio (i.e., resampling + mono channel selection) when calling *transcribe_file* if needed. # Install SpeechBrain First of all, please install tranformers and SpeechBrain with the following command: ``` pip install speechbrain transformers ``` Please notice that we encourage you to read the SpeechBrain tutorials and learn more about [SpeechBrain](https://speechbrain.github.io). # Transcribing your own audio files (in Wolof) ```python from speechbrain.pretrained import EncoderASR asr_model = EncoderASR.from_hparams(source="aioxlabs/dvoice-wolof", savedir="pretrained_models/asr-wav2vec2-dvoice-wol") asr_model.transcribe_file('./the_path_to_your_audio_file') ``` # Inference on GPU To perform inference on the GPU, add `run_opts={"device":"cuda"}` when calling the `from_hparams` method. # Training To train the model from scratch, please see our GitHub tutorial [here](https://github.com/AIOXLABS/DVoice). # Limitations The SpeechBrain team does not provide any warranty on the performance achieved by this model when used on other datasets. # Referencing SpeechBrain ``` @misc{SB2021, author = {Ravanelli, Mirco and Parcollet, Titouan and Rouhe, Aku and Plantinga, Peter and Rastorgueva, Elena and Lugosch, Loren and Dawalatabad, Nauman and Ju-Chieh, Chou and Heba, Abdel and Grondin, Francois and Aris, William and Liao, Chien-Feng and Cornell, Samuele and Yeh, Sung-Lin and Na, Hwidong and Gao, Yan and Fu, Szu-Wei and Subakan, Cem and De Mori, Renato and Bengio, Yoshua }, title = {SpeechBrain}, year = {2021}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\\\\url{https://github.com/speechbrain/speechbrain}}, } ``` # About DVoice DVoice is a community initiative that aims to provide Africa low resources languages with data and models to facilitate their use of voice technologies. The lack of data on these languages makes it necessary to collect data using methods that are specific to each one. Two different approaches are currently used: the DVoice platforms ([https://dvoice.ma](https://dvoice.ma) and [https://dvoice.sn](https://dvoice.sn)), which are based on Mozilla Common Voice, for collecting authentic recordings from the community, and transfer learning techniques for automatically labeling recordings that are retrived from social medias. The DVoice platform currently manages 7 languages including Darija (Moroccan Arabic dialect) whose dataset appears on this version, Wolof, Mandingo, Serere, Pular, Diola and Soninke. For this project, AIOX Labs the SI2M Laboratory are joining forces to build the future of technologies together. # About AIOX Labs Based in Rabat, London and Paris, AIOX-Labs mobilizes artificial intelligence technologies to meet the business needs and data projects of companies. - He is at the service of the growth of groups, the optimization of processes or the improvement of the customer experience. - AIOX-Labs is multi-sector, from fintech to industry, including retail and consumer goods. - Business ready data products with a solid algorithmic base and adaptability for the specific needs of each client. - A complementary team made up of doctors in AI and business experts with a solid scientific base and international publications. Website: [https://www.aiox-labs.com/](https://www.aiox-labs.com/) # SI2M Laboratory The Information Systems, Intelligent Systems and Mathematical Modeling Research Laboratory (SI2M) is an academic research laboratory of the National Institute of Statistics and Applied Economics (INSEA). The research areas of the laboratories are Information Systems, Intelligent Systems, Artificial Intelligence, Decision Support, Network and System Security, Mathematical Modelling. Website: [SI2M Laboratory](https://insea.ac.ma/index.php/pole-recherche/equipe-de-recherche/150-laboratoire-de-recherche-en-systemes-d-information-systemes-intelligents-et-modelisation-mathematique) # About SpeechBrain SpeechBrain is an open-source and all-in-one speech toolkit. It is designed to be simple, extremely flexible, and user-friendly. Competitive or state-of-the-art performance is obtained in various domains. Website: https://speechbrain.github.io/ GitHub: https://github.com/speechbrain/speechbrain # Referencing SpeechBrain ``` @misc{SB2021, author = {Ravanelli, Mirco and Parcollet, Titouan and Rouhe, Aku and Plantinga, Peter and Rastorgueva, Elena and Lugosch, Loren and Dawalatabad, Nauman and Ju-Chieh, Chou and Heba, Abdel and Grondin, Francois and Aris, William and Liao, Chien-Feng and Cornell, Samuele and Yeh, Sung-Lin and Na, Hwidong and Gao, Yan and Fu, Szu-Wei and Subakan, Cem and De Mori, Renato and Bengio, Yoshua }, title = {SpeechBrain}, year = {2021}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\\\\url{https://github.com/speechbrain/speechbrain}}, } ``` # Acknowledgements This research was supported through computational resources of HPC-MARWAN (www.marwan.ma/hpc) provided by CNRST, Rabat, Morocco. We deeply thank this institution.
Gergoe/t5-small-booksum-finetuned-booksum-test
974cb695fb8dd27402a671a1ddd0c9e3a7e56505
2022-05-26T21:41:22.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "summarization", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
summarization
false
Gergoe
null
Gergoe/t5-small-booksum-finetuned-booksum-test
2
null
transformers
26,140
--- license: mit tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: t5-small-booksum-finetuned-booksum-test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-booksum-finetuned-booksum-test This model is a fine-tuned version of [cnicu/t5-small-booksum](https://huggingface.co/cnicu/t5-small-booksum) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.2739 - Rouge1: 22.7829 - Rouge2: 4.8349 - Rougel: 18.2465 - Rougelsum: 19.2417 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 3.5123 | 1.0 | 8750 | 3.2816 | 21.7712 | 4.3046 | 17.4053 | 18.4707 | | 3.2347 | 2.0 | 17500 | 3.2915 | 22.2938 | 4.7828 | 17.8567 | 18.9135 | | 3.0892 | 3.0 | 26250 | 3.2568 | 22.4966 | 4.825 | 18.0344 | 19.1306 | | 2.9837 | 4.0 | 35000 | 3.2952 | 22.6913 | 5.0322 | 18.176 | 19.2751 | | 2.9028 | 5.0 | 43750 | 3.2626 | 22.3548 | 4.7521 | 17.8681 | 18.7815 | | 2.8441 | 6.0 | 52500 | 3.2691 | 22.6279 | 4.932 | 18.1051 | 19.0763 | | 2.8006 | 7.0 | 61250 | 3.2753 | 22.8911 | 4.8954 | 18.1204 | 19.1464 | | 2.7742 | 8.0 | 70000 | 3.2739 | 22.7829 | 4.8349 | 18.2465 | 19.2417 | ### Framework versions - Transformers 4.19.1 - Pytorch 1.7.0 - Datasets 2.2.1 - Tokenizers 0.12.1
actionpace/pegasus-samsum
8416f5498613011ddddaf404e1bc611d915fc9a0
2022-05-26T19:11:21.000Z
[ "pytorch", "tensorboard", "pegasus", "text2text-generation", "dataset:samsum", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
actionpace
null
actionpace/pegasus-samsum
2
null
transformers
26,141
--- tags: - generated_from_trainer datasets: - samsum model-index: - name: pegasus-samsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-samsum This model is a fine-tuned version of [google/pegasus-cnn_dailymail](https://huggingface.co/google/pegasus-cnn_dailymail) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.4841 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.7073 | 0.54 | 500 | 1.4841 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
kangela/Metaphor-FineTuned-BERT-5Epochs
67c49e531b92755c91ff20ef462477519983b9a6
2022-05-31T08:20:36.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
kangela
null
kangela/Metaphor-FineTuned-BERT-5Epochs
2
null
transformers
26,142
Entry not found
castorini/mdpr-tied-pft-msmarco-ft-all
7cf44df40bc9048163d2168b521783322b3eb531
2022-05-26T21:14:21.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
castorini
null
castorini/mdpr-tied-pft-msmarco-ft-all
2
null
transformers
26,143
The checkpoint is further fine-tuned based on the `castorini/mdpr-tied-pft-msmarco` checkpoint, on all the Mr. TyDi training data.
nqcccccc/phobert-multilabel-post-classification
83a2a410ec95b27007ecf78151d57f52ecb4c7d7
2022-05-27T07:13:41.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
nqcccccc
null
nqcccccc/phobert-multilabel-post-classification
2
null
transformers
26,144
Entry not found
elisabethvonoswald/wav2vec2-large-xls-r-300m-27-05
27ba45388df4d2be32ce9fdd60f5c7ec8953e886
2022-05-27T13:38:55.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
elisabethvonoswald
null
elisabethvonoswald/wav2vec2-large-xls-r-300m-27-05
2
null
transformers
26,145
Entry not found
onewithnickelcoins/roberta-base-stars
1a77d0b1fbe148559253be7d0496e0bbe6511707
2022-05-27T13:15:43.000Z
[ "pytorch", "roberta", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
onewithnickelcoins
null
onewithnickelcoins/roberta-base-stars
2
null
transformers
26,146
--- license: mit tags: - generated_from_trainer metrics: - accuracy model-index: - name: roberta-base-stars results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-stars This model is a fine-tuned version of [onewithnickelcoins/roberta-base-MLM](https://huggingface.co/onewithnickelcoins/roberta-base-MLM) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.2914 - Accuracy: 0.6857 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: tpu - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.10.2+cu113 - Datasets 1.18.4 - Tokenizers 0.11.6
SivilTaram/poet-sql
6b5bb6d30df335b5b8e21650f8d9ff7187159f5d
2022-05-27T13:29:58.000Z
[ "pytorch", "bart", "feature-extraction", "transformers", "license:apache-2.0" ]
feature-extraction
false
SivilTaram
null
SivilTaram/poet-sql
2
null
transformers
26,147
--- license: apache-2.0 ---
SivilTaram/poet-sql-digit
148416e998dffe2e997776490745b2138fa99b6f
2022-05-27T13:55:45.000Z
[ "pytorch", "bart", "feature-extraction", "transformers", "license:apache-2.0" ]
feature-extraction
false
SivilTaram
null
SivilTaram/poet-sql-digit
2
null
transformers
26,148
--- license: apache-2.0 ---
wuyue19871987/twitter-roberta-base-sentiment-finetuned
59b9fa8b4e2f725a878f4a42880a91da6c24e6c8
2022-05-28T02:32:12.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
wuyue19871987
null
wuyue19871987/twitter-roberta-base-sentiment-finetuned
2
null
transformers
26,149
Entry not found
KoichiYasuoka/deberta-base-coptic
287c0735a50ab79f1dae6b373849a55c2c97f000
2022-05-28T09:19:16.000Z
[ "pytorch", "deberta-v2", "fill-mask", "cop", "transformers", "coptic", "masked-lm", "license:cc-by-sa-4.0", "autotrain_compatible" ]
fill-mask
false
KoichiYasuoka
null
KoichiYasuoka/deberta-base-coptic
2
null
transformers
26,150
--- language: - "cop" tags: - "coptic" - "masked-lm" license: "cc-by-sa-4.0" pipeline_tag: "fill-mask" mask_token: "[MASK]" --- # deberta-base-coptic ## Model Description This is a DeBERTa(V2) model pre-trained on Coptic Scriptorium Corpora. You can fine-tune `deberta-base-coptic` for downstream tasks, such as [POS-tagging](https://huggingface.co/KoichiYasuoka/deberta-base-coptic-upos), dependency-parsing, and so on. ## How to Use ```py from transformers import AutoTokenizer,AutoModelForMaskedLM tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/deberta-base-coptic") model=AutoModelForMaskedLM.from_pretrained("KoichiYasuoka/deberta-base-coptic") ```
JuanForeroNeme/ES_UC_MODELO_NPL_E3_V1
db372a547b4d7d8cc06a8f79627d55767ccb8d18
2022-05-28T17:40:40.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
JuanForeroNeme
null
JuanForeroNeme/ES_UC_MODELO_NPL_E3_V1
2
null
transformers
26,151
Entry not found
JuanForeroNeme/ES_UC_MODELO_NPL_E3_V2
20f372a013a805a4ac0f47874696a9c1d341dcf9
2022-05-28T19:05:51.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
JuanForeroNeme
null
JuanForeroNeme/ES_UC_MODELO_NPL_E3_V2
2
null
transformers
26,152
**ENTREGABLE 3** * Magda Brigitte Baron * Juan Guillermo Forero Neme * Myriam Leguizamon Lopez * Diego Alexander Maca Garcia
GiordanoB/mbart-large-50-finetuned-summarization-V2
9c32cdb246c1c5686ec7f9002907a214d9b858eb
2022-05-29T00:51:55.000Z
[ "pytorch", "tensorboard", "mbart", "text2text-generation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
GiordanoB
null
GiordanoB/mbart-large-50-finetuned-summarization-V2
2
null
transformers
26,153
--- tags: - generated_from_trainer metrics: - rouge model-index: - name: mbart-large-50-finetuned-summarization-V2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-large-50-finetuned-summarization-V2 This model is a fine-tuned version of [facebook/mbart-large-50](https://huggingface.co/facebook/mbart-large-50) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.9183 - Rouge1: 50.0118 - Rouge2: 31.3168 - Rougel: 37.6392 - Rougelsum: 45.2287 - Gen Len: 102.3571 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | No log | 1.0 | 15 | 2.0228 | 51.9711 | 32.5963 | 39.9154 | 48.3431 | 134.6429 | | No log | 2.0 | 30 | 1.9410 | 48.2977 | 30.5942 | 35.9761 | 43.7634 | 92.0714 | | No log | 3.0 | 45 | 1.9183 | 50.0118 | 31.3168 | 37.6392 | 45.2287 | 102.3571 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
susghosh/bert-finetuned-squad
c6d467a0c29b97fcc2d3a0c5f23a06d68e3bca59
2022-05-29T14:55:08.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
susghosh
null
susghosh/bert-finetuned-squad
2
null
transformers
26,154
Entry not found
hunkim/model1
381f9fa48dce03be650ce0595c16a873473f4217
2022-05-29T09:29:36.000Z
[ "pytorch", "roberta", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
hunkim
null
hunkim/model1
2
null
sentence-transformers
26,155
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # Sung/model1 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Sung/model1') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Sung/model1') model = AutoModel.from_pretrained('Sung/model1') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Sung/model1) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
shafin/distilbert-base-uncased-finetuned-cust-similarity-1
e95c0c158c93a7f194d5b48b60317297953ce97a
2022-05-29T09:49:25.000Z
[ "pytorch", "distilbert", "feature-extraction", "sentence-transformers", "sentence-similarity" ]
sentence-similarity
false
shafin
null
shafin/distilbert-base-uncased-finetuned-cust-similarity-1
2
1
sentence-transformers
26,156
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # shafin/distilbert-base-uncased-finetuned-cust-similarity-1 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 32 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('shafin/distilbert-base-uncased-finetuned-cust-similarity-1') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=shafin/distilbert-base-uncased-finetuned-cust-similarity-1) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 4375 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.OnlineContrastiveLoss.OnlineContrastiveLoss` Parameters of the fit()-Method: ``` { "epochs": 15, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 3000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 256, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) (3): Dense({'in_features': 256, 'out_features': 32, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
hunkim/sentence-transformer-klue
02257b1fdd65e61861456ece23700b1d14b79d32
2022-05-29T13:47:40.000Z
[ "pytorch", "roberta", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
hunkim
null
hunkim/sentence-transformer-klue
2
null
sentence-transformers
26,157
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # Sung/sentence-transformer-klue This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Sung/sentence-transformer-klue') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Sung/sentence-transformer-klue') model = AutoModel.from_pretrained('Sung/sentence-transformer-klue') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Sung/sentence-transformer-klue) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 365 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 4, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 146, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
pujaburman30/autotrain-hi_ner_xlmr_large-924630372
0ba257d6993841f7deb338c616ca2f911499b864
2022-05-29T13:44:19.000Z
[ "pytorch", "xlm-roberta", "token-classification", "unk", "dataset:pujaburman30/autotrain-data-hi_ner_xlmr_large", "transformers", "autotrain", "co2_eq_emissions", "autotrain_compatible" ]
token-classification
false
pujaburman30
null
pujaburman30/autotrain-hi_ner_xlmr_large-924630372
2
null
transformers
26,158
--- tags: autotrain language: unk widget: - text: "I love AutoTrain 🤗" datasets: - pujaburman30/autotrain-data-hi_ner_xlmr_large co2_eq_emissions: 5.880084418778246 --- # Model Trained Using AutoTrain - Problem type: Entity Extraction - Model ID: 924630372 - CO2 Emissions (in grams): 5.880084418778246 ## Validation Metrics - Loss: 0.8206124901771545 - Accuracy: 0.7745009890307498 - Precision: 0.6042857142857143 - Recall: 0.6547987616099071 - F1: 0.6285289747399703 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/pujaburman30/autotrain-hi_ner_xlmr_large-924630372 ``` Or Python API: ``` from transformers import AutoModelForTokenClassification, AutoTokenizer model = AutoModelForTokenClassification.from_pretrained("pujaburman30/autotrain-hi_ner_xlmr_large-924630372", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("pujaburman30/autotrain-hi_ner_xlmr_large-924630372", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
xuio/sts-12ep
d47f823ff6ef4fc9b6304fe6bb25dfa9e3baf129
2022-05-29T15:00:01.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
xuio
null
xuio/sts-12ep
2
null
transformers
26,159
Entry not found
chi0/kobart-dial-sum
c015a43853bf9fc5ebd2f630826f971c0fd96131
2022-05-29T15:19:28.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
chi0
null
chi0/kobart-dial-sum
2
null
transformers
26,160
Entry not found
KFlash/bert-finetuned-squad
9f02cd755755ee3aaf8b14273ed862f0cda21ae7
2022-06-02T15:22:00.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
KFlash
null
KFlash/bert-finetuned-squad
2
null
transformers
26,161
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Tokenizers 0.12.1
jg/xlm-roberta-base-finetuned-panx-de
bccba5127ee0de2bf8398013594cf26a7327d4c3
2022-06-04T10:59:50.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
jg
null
jg/xlm-roberta-base-finetuned-panx-de
2
null
transformers
26,162
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8620945214069894 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1372 - F1: 0.8621 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2575 | 1.0 | 525 | 0.1621 | 0.8292 | | 0.1287 | 2.0 | 1050 | 0.1378 | 0.8526 | | 0.0831 | 3.0 | 1575 | 0.1372 | 0.8621 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
GiordanoB/ptt5-base-portuguese-vocab-summarizacao-PTT-BR
bbeeacb3e4ae566bddafae79af1f09941b3fca5e
2022-05-30T17:33:31.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
GiordanoB
null
GiordanoB/ptt5-base-portuguese-vocab-summarizacao-PTT-BR
2
null
transformers
26,163
--- license: mit tags: - generated_from_trainer model-index: - name: ptt5-base-portuguese-vocab-summarizacao-PTT-BR results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ptt5-base-portuguese-vocab-summarizacao-PTT-BR This model is a fine-tuned version of [unicamp-dl/ptt5-base-portuguese-vocab](https://huggingface.co/unicamp-dl/ptt5-base-portuguese-vocab) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.6954 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 15 | 4.6282 | | No log | 2.0 | 30 | 3.9111 | | No log | 3.0 | 45 | 3.6954 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
veronica320/EPC_ADEPT_roberta-l_all
fb4f2a0e9da0e2221464aef4d726d48fa03cc16b
2022-05-30T01:20:15.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
veronica320
null
veronica320/EPC_ADEPT_roberta-l_all
2
null
transformers
26,164
Entry not found
veronica320/SPTE_roberta-large-mnli_all
b2491a0ad751db3d08cc67ef11274c99391f381a
2022-05-30T01:21:32.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
veronica320
null
veronica320/SPTE_roberta-large-mnli_all
2
null
transformers
26,165
Entry not found
stevemobs/deberta-base-combined-squad1-aqa-1epoch-and-newsqa-1epoch
f665410350005f09073dc8b729d240221ee33def
2022-05-30T09:12:36.000Z
[ "pytorch", "tensorboard", "deberta", "question-answering", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
stevemobs
null
stevemobs/deberta-base-combined-squad1-aqa-1epoch-and-newsqa-1epoch
2
null
transformers
26,166
--- license: mit tags: - generated_from_trainer model-index: - name: deberta-base-combined-squad1-aqa-1epoch-and-newsqa-1epoch results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-combined-squad1-aqa-1epoch-and-newsqa-1epoch This model is a fine-tuned version of [stevemobs/deberta-base-combined-squad1-aqa-1epoch](https://huggingface.co/stevemobs/deberta-base-combined-squad1-aqa-1epoch) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6807 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.6654 | 1.0 | 17307 | 0.6807 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
xuio/roberta-sts12
1851713deafe7ad0c483852531ebacef685f3376
2022-05-30T04:08:14.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
xuio
null
xuio/roberta-sts12
2
null
transformers
26,167
Entry not found
hsuk/tiny-bert-sst2-distilled
039fc84b6a245581a5064a6dbf77fe91355b9061
2022-06-05T07:24:53.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
hsuk
null
hsuk/tiny-bert-sst2-distilled
2
null
transformers
26,168
Entry not found
bekirbakar/wav2vec2-large-xlsr-53-tr-fine-tuning-01
cc81c6405acb5366d6170d4bf3efab01d2130e52
2022-06-16T13:36:05.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
bekirbakar
null
bekirbakar/wav2vec2-large-xlsr-53-tr-fine-tuning-01
2
null
transformers
26,169
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xlsr-53-tr-fine-tuning-01 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53-tr-fine-tuning-01 This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice dataset.
jkhan447/sarcasm-detection-Bert-base-uncased-CR
b2ec4d9ad85d85c5def7f822b26672f219bc677d
2022-05-30T15:02:31.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
jkhan447
null
jkhan447/sarcasm-detection-Bert-base-uncased-CR
2
null
transformers
26,170
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: sarcasm-detection-Bert-base-uncased-CR results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sarcasm-detection-Bert-base-uncased-CR This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.2057 - Accuracy: 0.7187 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
godwinh/distilbert-base-uncased-finetuned-clinc
4c37c3ff13a19b7e896b625d55d3b8864f3359a6
2022-05-30T15:44:26.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
godwinh
null
godwinh/distilbert-base-uncased-finetuned-clinc
2
null
transformers
26,171
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-clinc results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an clinc dataset. It achieves the following results on the evaluation set: - Loss: 0.7721 - Accuracy: 0.9184 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 318 | 3.2890 | 0.7429 | | 3.7868 | 2.0 | 636 | 1.8756 | 0.8374 | | 3.7868 | 3.0 | 954 | 1.1571 | 0.8961 | | 1.6929 | 4.0 | 1272 | 0.8574 | 0.9132 | | 0.9057 | 5.0 | 1590 | 0.7721 | 0.9184 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Tokenizers 0.12.1
Splend1dchan/xtreme_s_xlsr_300m_mt5-small_minds14.en-US
df7dcdc0b1c71c634fdc9cbf7e2d1e5d209336a7
2022-05-30T12:33:15.000Z
[ "pytorch", "tensorboard", "wav2vec2", "en-US", "dataset:xtreme_s", "transformers", "minds14", "google/xtreme_s", "generated_from_trainer", "license:apache-2.0", "model-index" ]
null
false
Splend1dchan
null
Splend1dchan/xtreme_s_xlsr_300m_mt5-small_minds14.en-US
2
null
transformers
26,172
--- language: - en-US license: apache-2.0 tags: - minds14 - google/xtreme_s - generated_from_trainer datasets: - xtreme_s metrics: - f1 - accuracy model-index: - name: xtreme_s_xlsr_300m_mt5-small_minds14.en-US results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xtreme_s_xlsr_300m_mt5-small_minds14.en-US This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the GOOGLE/XTREME_S - MINDS14.EN-US dataset. It achieves the following results on the evaluation set: - Loss: 4.7321 - F1: 0.0154 - Accuracy: 0.0638 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 8 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 50.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:--------:| | 2.6067 | 3.95 | 20 | 2.6501 | 0.0112 | 0.0851 | | 2.5614 | 7.95 | 40 | 2.8018 | 0.0133 | 0.0603 | | 2.2836 | 11.95 | 60 | 3.0786 | 0.0084 | 0.0603 | | 1.9597 | 15.95 | 80 | 3.2288 | 0.0126 | 0.0638 | | 1.5566 | 19.95 | 100 | 3.6934 | 0.0178 | 0.0567 | | 1.3168 | 23.95 | 120 | 3.9135 | 0.0150 | 0.0638 | | 1.0598 | 27.95 | 140 | 4.2618 | 0.0084 | 0.0603 | | 0.5721 | 31.95 | 160 | 3.7973 | 0.0354 | 0.0780 | | 0.4402 | 35.95 | 180 | 4.6233 | 0.0179 | 0.0638 | | 0.6113 | 39.95 | 200 | 4.6149 | 0.0208 | 0.0674 | | 0.3938 | 43.95 | 220 | 4.7886 | 0.0159 | 0.0638 | | 0.2473 | 47.95 | 240 | 4.7321 | 0.0154 | 0.0638 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
zdreiosis/bert-finetuned-sem_eval-english
900ee6cf8545751a3d1b53a8e30f48d3e9476be4
2022-05-31T02:36:16.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "3rd", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
zdreiosis
null
zdreiosis/bert-finetuned-sem_eval-english
2
null
transformers
26,173
--- license: apache-2.0 tags: - 3rd - generated_from_trainer metrics: - f1 - accuracy model-index: - name: bert-finetuned-sem_eval-english results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-sem_eval-english This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5536 - F1: 0.5455 - Roc Auc: 0.6968 - Accuracy: 0.1839 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Training results ### Framework versions - Transformers 4.15.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.10.3
g8a9/bert-base-italian-cased_ami20
69caa4ec07bea9e93aefbe91cc3e729d81f00d21
2022-05-30T12:55:07.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
g8a9
null
g8a9/bert-base-italian-cased_ami20
2
null
transformers
26,174
Entry not found
Splend1dchan/xtreme_s_xlsr_300m_freeze_minds14.en-US
353d03e0ee0f78bdc44af81beee0c60ca76bce1e
2022-05-30T13:36:05.000Z
[ "pytorch", "tensorboard", "wav2vec2", "transformers" ]
null
false
Splend1dchan
null
Splend1dchan/xtreme_s_xlsr_300m_freeze_minds14.en-US
2
null
transformers
26,175
Entry not found
Splend1dchan/xtreme_s_xlsr_300m_nofreeze_minds14.en-US
d890b6d1943ef8f4c55d361e751db5fe423ca6f4
2022-05-30T15:41:00.000Z
[ "pytorch", "tensorboard", "wav2vec2", "transformers" ]
null
false
Splend1dchan
null
Splend1dchan/xtreme_s_xlsr_300m_nofreeze_minds14.en-US
2
null
transformers
26,176
Entry not found
joebobby/finetuning-sentiment-model-5000-samples3
ce0e5d599610cfd75715979a4db99fd6c09adb2c
2022-05-31T17:53:11.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
joebobby
null
joebobby/finetuning-sentiment-model-5000-samples3
2
null
transformers
26,177
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: finetuning-sentiment-model-5000-samples3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-5000-samples3 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Jiexing/sparc_add_coref_t5_3b_order_0514_ckpt-5696
39b5026148ba2e7b38173612e3d41b25c760a363
2022-05-30T15:42:08.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Jiexing
null
Jiexing/sparc_add_coref_t5_3b_order_0514_ckpt-5696
2
null
transformers
26,178
Entry not found
Mikey8943/marian-finetuned-kde4-en-to-fr
8d2ccaa20dcf6cf1a9d621f6df21d4a67a6dd797
2022-05-30T17:16:08.000Z
[ "pytorch", "tensorboard", "marian", "text2text-generation", "dataset:kde4", "transformers", "translation", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
translation
false
Mikey8943
null
Mikey8943/marian-finetuned-kde4-en-to-fr
2
null
transformers
26,179
--- license: apache-2.0 tags: - translation - generated_from_trainer datasets: - kde4 metrics: - bleu model-index: - name: marian-finetuned-kde4-en-to-fr results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: kde4 type: kde4 args: en-fr metrics: - name: Bleu type: bleu value: 50.16950271131339 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. It achieves the following results on the evaluation set: - Loss: 0.9643 - Bleu: 50.1695 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.15.0 - Pytorch 1.11.0+cu113 - Datasets 1.17.0 - Tokenizers 0.10.3
income/jpq-question_encoder-base-msmarco-distilbert-tas-b
d5e33cfc225d209ca94e395b0f22630d93542c17
2022-05-30T17:18:28.000Z
[ "pytorch", "distilbert", "transformers", "license:apache-2.0" ]
null
false
income
null
income/jpq-question_encoder-base-msmarco-distilbert-tas-b
2
null
transformers
26,180
--- license: apache-2.0 ---
income/jpq-document_encoder-base-msmarco-distilbert-tas-b
b963f87184025a9fa5217994f6fac65af2b108fe
2022-05-30T17:23:56.000Z
[ "pytorch", "distilbert", "transformers", "license:apache-2.0" ]
null
false
income
null
income/jpq-document_encoder-base-msmarco-distilbert-tas-b
2
null
transformers
26,181
--- license: apache-2.0 ---
kimcando/reg_trained
59ce88f0150635970aa450e2dead6bd5dc8dc13a
2022-05-30T17:25:15.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
kimcando
null
kimcando/reg_trained
2
null
transformers
26,182
Entry not found
ViktorDo/distilbert-base-uncased-scratch-powo_mgh_pt
b34f7f8e5aa284a53d750c02a75d3b4c250df71c
2022-05-30T18:37:13.000Z
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
fill-mask
false
ViktorDo
null
ViktorDo/distilbert-base-uncased-scratch-powo_mgh_pt
2
null
transformers
26,183
--- tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-scratch-powo_mgh_pt results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-scratch-powo_mgh_pt This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.0408 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 5 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 40 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 6.4584 | 0.2 | 200 | 4.7806 | | 4.6385 | 0.41 | 400 | 4.3704 | | 4.2219 | 0.61 | 600 | 4.0727 | | 3.994 | 0.81 | 800 | 3.8772 | | 3.8048 | 1.01 | 1000 | 3.6894 | | 3.6722 | 1.22 | 1200 | 3.5732 | | 3.4828 | 1.42 | 1400 | 3.4203 | | 3.3648 | 1.62 | 1600 | 3.3634 | | 3.3918 | 1.83 | 1800 | 3.2685 | | 3.3919 | 2.03 | 2000 | 3.2027 | | 3.1715 | 2.23 | 2200 | 3.1365 | | 3.0635 | 2.43 | 2400 | 3.1228 | | 3.0804 | 2.64 | 2600 | 3.0595 | | 3.0468 | 2.84 | 2800 | 3.0318 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
erfangc/marian-finetuned-kde4-en-to-fr
6a93d10ebfa40268802c7e949e7384ca9bf77da6
2022-05-31T01:44:37.000Z
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erfangc
null
erfangc/marian-finetuned-kde4-en-to-fr
2
null
transformers
26,184
Entry not found
Splend1dchan/xtreme_s_xlsr_300m_mt5-small_minds14.en-US_my
83409e59f974454e996f9880e23ae00b30bcbbb3
2022-05-31T03:59:50.000Z
[ "pytorch", "tensorboard", "wav2vec2", "en-US", "dataset:xtreme_s", "transformers", "minds14", "google/xtreme_s", "generated_from_trainer", "license:apache-2.0", "model-index" ]
null
false
Splend1dchan
null
Splend1dchan/xtreme_s_xlsr_300m_mt5-small_minds14.en-US_my
2
null
transformers
26,185
--- language: - en-US license: apache-2.0 tags: - minds14 - google/xtreme_s - generated_from_trainer datasets: - xtreme_s metrics: - f1 - accuracy model-index: - name: xtreme_s_xlsr_300m_minds14.en-US_2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xtreme_s_xlsr_300m_minds14.en-US_2 This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m)--concat-->mt5 on the GOOGLE/XTREME_S - MINDS14.EN-US dataset. It achieves the following results on the evaluation set: - Loss: 0.5685 - F1: 0.83333 - Accuracy: 0.83258 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 50.0 - mixed_precision_training: Native AMP ### Training results See TensorBoard ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Jiexing/cosql_add_coref_t5_3b_order_0519_ckpt-2624
27132d3dd34c2b86752cc54894abb8dd3419f787
2022-05-31T02:23:05.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Jiexing
null
Jiexing/cosql_add_coref_t5_3b_order_0519_ckpt-2624
2
null
transformers
26,186
Entry not found
Splend1dchan/xtreme_s_w2v2_minds14.en-US
0b28a0567cf95e4ef81ed4b16cdfcb7ff4a4178a
2022-05-31T04:55:00.000Z
[ "pytorch", "tensorboard", "wav2vec2", "en-US", "dataset:xtreme_s", "transformers", "minds14", "google/xtreme_s", "generated_from_trainer", "license:apache-2.0", "model-index" ]
null
false
Splend1dchan
null
Splend1dchan/xtreme_s_w2v2_minds14.en-US
2
null
transformers
26,187
--- language: - en-US license: apache-2.0 tags: - minds14 - google/xtreme_s - generated_from_trainer datasets: - xtreme_s metrics: - f1 - accuracy model-index: - name: xtreme_s_w2v2_minds14.en-US results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xtreme_s_w2v2_minds14.en-US This model is a fine-tuned version of [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) on the GOOGLE/XTREME_S - MINDS14.EN-US dataset. It achieves the following results on the evaluation set: - Loss: 0.5337 - F1: 0.9144 - Accuracy: 0.9113 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 150.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy | |:-------------:|:------:|:----:|:---------------:|:------:|:--------:| | 2.6482 | 3.95 | 20 | 2.6421 | 0.0242 | 0.0745 | | 2.6292 | 7.95 | 40 | 2.6359 | 0.0108 | 0.0816 | | 2.5993 | 11.95 | 60 | 2.6301 | 0.0167 | 0.0674 | | 2.4023 | 15.95 | 80 | 2.5514 | 0.1105 | 0.1454 | | 1.4015 | 19.95 | 100 | 1.6843 | 0.5599 | 0.5851 | | 0.4379 | 23.95 | 120 | 0.8126 | 0.7921 | 0.7908 | | 0.0642 | 27.95 | 140 | 0.7178 | 0.8158 | 0.8156 | | 0.0376 | 31.95 | 160 | 0.7286 | 0.8473 | 0.8475 | | 0.0185 | 35.95 | 180 | 0.6779 | 0.8719 | 0.8723 | | 0.0752 | 39.95 | 200 | 0.7096 | 0.8578 | 0.8511 | | 0.0266 | 43.95 | 220 | 0.7655 | 0.8596 | 0.8546 | | 0.0078 | 47.95 | 240 | 0.7623 | 0.8563 | 0.8511 | | 0.007 | 51.95 | 260 | 0.6620 | 0.8794 | 0.8759 | | 0.0047 | 55.95 | 280 | 0.5936 | 0.9045 | 0.9007 | | 0.0067 | 59.95 | 300 | 0.8279 | 0.8546 | 0.8617 | | 0.0394 | 63.95 | 320 | 0.8766 | 0.8359 | 0.8227 | | 0.0051 | 67.95 | 340 | 0.8097 | 0.8483 | 0.8475 | | 0.0095 | 71.95 | 360 | 0.6095 | 0.9083 | 0.9078 | | 0.0026 | 75.95 | 380 | 0.5286 | 0.8889 | 0.8865 | | 0.0023 | 79.95 | 400 | 0.7218 | 0.8926 | 0.8936 | | 0.0023 | 83.95 | 420 | 0.6551 | 0.8997 | 0.8972 | | 0.0027 | 87.95 | 440 | 0.6664 | 0.8848 | 0.8794 | | 0.0019 | 91.95 | 460 | 0.5344 | 0.9032 | 0.9043 | | 0.002 | 95.95 | 480 | 0.5863 | 0.8983 | 0.9007 | | 0.0015 | 99.95 | 500 | 0.5715 | 0.9047 | 0.9043 | | 0.0016 | 103.95 | 520 | 0.5615 | 0.8956 | 0.8936 | | 0.0014 | 107.95 | 540 | 0.6353 | 0.8965 | 0.8936 | | 0.0014 | 111.95 | 560 | 0.5593 | 0.9041 | 0.9007 | | 0.0013 | 115.95 | 580 | 0.6041 | 0.8977 | 0.8936 | | 0.0013 | 119.95 | 600 | 0.5794 | 0.9026 | 0.9007 | | 0.0012 | 123.95 | 620 | 0.6858 | 0.9003 | 0.8972 | | 0.0013 | 127.95 | 640 | 0.6730 | 0.9002 | 0.8972 | | 0.0013 | 131.95 | 660 | 0.5707 | 0.9146 | 0.9113 | | 0.0012 | 135.95 | 680 | 0.5604 | 0.9153 | 0.9113 | | 0.0019 | 139.95 | 700 | 0.5468 | 0.9114 | 0.9078 | | 0.0015 | 143.95 | 720 | 0.5361 | 0.9144 | 0.9113 | | 0.0012 | 147.95 | 740 | 0.5337 | 0.9144 | 0.9113 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Splend1dchan/xtreme_s_w2v2_t5lephone-small_minds14.en-US
51da1dd830dd45f5161947c66ebe07ec08ef1f77
2022-05-31T06:14:06.000Z
[ "pytorch", "tensorboard", "wav2vec2", "en-US", "dataset:xtreme_s", "transformers", "minds14", "google/xtreme_s", "generated_from_trainer", "license:apache-2.0", "model-index" ]
null
false
Splend1dchan
null
Splend1dchan/xtreme_s_w2v2_t5lephone-small_minds14.en-US
2
null
transformers
26,188
--- language: - en-US license: apache-2.0 tags: - minds14 - google/xtreme_s - generated_from_trainer datasets: - xtreme_s metrics: - f1 - accuracy model-index: - name: xtreme_s_w2v2_t5lephone-small_minds14.en-US results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xtreme_s_w2v2_t5lephone-small_minds14.en-US This model is a fine-tuned version of [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) on the GOOGLE/XTREME_S - MINDS14.EN-US dataset. It achieves the following results on the evaluation set: - Loss: 1.5203 - F1: 0.7526 - Accuracy: 0.7518 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 150.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy | |:-------------:|:------:|:----:|:---------------:|:------:|:--------:| | 2.589 | 3.95 | 20 | 2.6401 | 0.0108 | 0.0816 | | 2.5223 | 7.95 | 40 | 2.6493 | 0.0339 | 0.0816 | | 2.5085 | 11.95 | 60 | 2.6236 | 0.0539 | 0.1028 | | 2.1252 | 15.95 | 80 | 2.5006 | 0.1458 | 0.1667 | | 1.3711 | 19.95 | 100 | 2.2712 | 0.2344 | 0.2837 | | 1.5092 | 23.95 | 120 | 2.0599 | 0.3631 | 0.3936 | | 0.4962 | 27.95 | 140 | 1.8475 | 0.4881 | 0.4894 | | 0.4169 | 31.95 | 160 | 1.8262 | 0.5358 | 0.5142 | | 0.1579 | 35.95 | 180 | 1.6481 | 0.5967 | 0.6028 | | 0.0927 | 39.95 | 200 | 1.4470 | 0.6748 | 0.6560 | | 0.1363 | 43.95 | 220 | 1.2725 | 0.6836 | 0.6879 | | 0.1324 | 47.95 | 240 | 1.4330 | 0.6653 | 0.6702 | | 0.0294 | 51.95 | 260 | 1.2978 | 0.7079 | 0.7163 | | 0.0326 | 55.95 | 280 | 1.3869 | 0.6823 | 0.6879 | | 0.0444 | 59.95 | 300 | 1.5764 | 0.7051 | 0.6986 | | 0.0527 | 63.95 | 320 | 2.2013 | 0.5899 | 0.5851 | | 0.1542 | 67.95 | 340 | 1.5203 | 0.7053 | 0.6986 | | 0.0127 | 71.95 | 360 | 1.7149 | 0.7105 | 0.7128 | | 0.0105 | 75.95 | 380 | 1.2471 | 0.7853 | 0.7837 | | 0.009 | 79.95 | 400 | 1.5720 | 0.7065 | 0.7057 | | 0.0081 | 83.95 | 420 | 1.9395 | 0.6656 | 0.6702 | | 0.2345 | 87.95 | 440 | 1.5704 | 0.7408 | 0.7411 | | 0.0076 | 91.95 | 460 | 1.4706 | 0.7554 | 0.7589 | | 0.0064 | 95.95 | 480 | 1.5746 | 0.7491 | 0.7518 | | 0.3105 | 99.95 | 500 | 1.6824 | 0.7273 | 0.7376 | | 0.0058 | 103.95 | 520 | 1.3799 | 0.7474 | 0.7624 | | 0.0055 | 107.95 | 540 | 1.4086 | 0.7350 | 0.7518 | | 0.0051 | 111.95 | 560 | 1.2832 | 0.7874 | 0.7979 | | 0.0052 | 115.95 | 580 | 1.3474 | 0.7752 | 0.7801 | | 0.0046 | 119.95 | 600 | 1.6125 | 0.7451 | 0.7482 | | 0.0044 | 123.95 | 620 | 1.5927 | 0.7486 | 0.7518 | | 0.0044 | 127.95 | 640 | 1.5551 | 0.7487 | 0.7518 | | 0.0041 | 131.95 | 660 | 1.5117 | 0.7631 | 0.7660 | | 0.0041 | 135.95 | 680 | 1.5210 | 0.7577 | 0.7624 | | 0.0041 | 139.95 | 700 | 1.5145 | 0.7655 | 0.7660 | | 0.004 | 143.95 | 720 | 1.5053 | 0.7665 | 0.7660 | | 0.004 | 147.95 | 740 | 1.5203 | 0.7526 | 0.7518 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
bdh240901/wav2vec2-large-xls-r-300m-vi-colab
68d771f4c3ef3bec8c355b729b890e7eba7fb571
2022-05-31T06:11:31.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
bdh240901
null
bdh240901/wav2vec2-large-xls-r-300m-vi-colab
2
null
transformers
26,189
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-vi-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-vi-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
PontifexMaximus/opus-mt-iir-en-finetuned-fa-to-en
4a607104849f79b6317bab04d4dfc6674eb9c405
2022-06-02T09:38:06.000Z
[ "pytorch", "tensorboard", "marian", "text2text-generation", "dataset:opus_infopankki", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
PontifexMaximus
null
PontifexMaximus/opus-mt-iir-en-finetuned-fa-to-en
2
null
transformers
26,190
--- license: apache-2.0 tags: - generated_from_trainer datasets: - opus_infopankki metrics: - bleu model-index: - name: opus-mt-iir-en-finetuned-fa-to-en results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: opus_infopankki type: opus_infopankki args: en-fa metrics: - name: Bleu type: bleu value: 36.687 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-iir-en-finetuned-fa-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-iir-en](https://huggingface.co/Helsinki-NLP/opus-mt-iir-en) on the opus_infopankki dataset. It achieves the following results on the evaluation set: - Loss: 1.0968 - Bleu: 36.687 - Gen Len: 16.039 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-06 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 3.1614 | 1.0 | 1509 | 2.8058 | 12.326 | 16.5467 | | 2.7235 | 2.0 | 3018 | 2.4178 | 15.6912 | 16.6396 | | 2.4839 | 3.0 | 4527 | 2.1905 | 18.1971 | 16.4884 | | 2.3044 | 4.0 | 6036 | 2.0272 | 20.197 | 16.4735 | | 2.1943 | 5.0 | 7545 | 1.9012 | 22.2265 | 16.4266 | | 2.0669 | 6.0 | 9054 | 1.7984 | 23.7711 | 16.353 | | 1.985 | 7.0 | 10563 | 1.7100 | 24.986 | 16.284 | | 1.9024 | 8.0 | 12072 | 1.6346 | 26.1758 | 16.217 | | 1.8484 | 9.0 | 13581 | 1.5692 | 27.2782 | 16.1924 | | 1.7761 | 10.0 | 15090 | 1.5111 | 28.2761 | 16.144 | | 1.733 | 11.0 | 16599 | 1.4599 | 29.2184 | 16.2438 | | 1.6772 | 12.0 | 18108 | 1.4150 | 30.0026 | 16.1949 | | 1.6297 | 13.0 | 19617 | 1.3743 | 30.7839 | 16.1565 | | 1.5918 | 14.0 | 21126 | 1.3370 | 31.4921 | 16.1323 | | 1.5548 | 15.0 | 22635 | 1.3038 | 32.0621 | 16.076 | | 1.5333 | 16.0 | 24144 | 1.2743 | 32.6881 | 16.0078 | | 1.5145 | 17.0 | 25653 | 1.2478 | 33.3794 | 16.1228 | | 1.4826 | 18.0 | 27162 | 1.2240 | 33.8335 | 16.0809 | | 1.4488 | 19.0 | 28671 | 1.2021 | 34.2819 | 16.0479 | | 1.4386 | 20.0 | 30180 | 1.1829 | 34.7206 | 16.0578 | | 1.4127 | 21.0 | 31689 | 1.1660 | 35.031 | 16.0717 | | 1.4089 | 22.0 | 33198 | 1.1510 | 35.4142 | 16.0391 | | 1.3922 | 23.0 | 34707 | 1.1380 | 35.6777 | 16.0461 | | 1.377 | 24.0 | 36216 | 1.1273 | 35.95 | 16.0569 | | 1.3598 | 25.0 | 37725 | 1.1175 | 36.2435 | 16.0426 | | 1.3515 | 26.0 | 39234 | 1.1097 | 36.4009 | 16.0247 | | 1.3441 | 27.0 | 40743 | 1.1042 | 36.4815 | 16.0447 | | 1.3412 | 28.0 | 42252 | 1.1001 | 36.6092 | 16.0489 | | 1.3527 | 29.0 | 43761 | 1.0976 | 36.6703 | 16.0383 | | 1.3397 | 30.0 | 45270 | 1.0968 | 36.687 | 16.039 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.7.1+cu110 - Datasets 2.2.2 - Tokenizers 0.12.1
theojolliffe/bart-cnn-science-v3-e3
5a20f2a8ea7616df4386cdd7aa792b65f29d6a0d
2022-05-31T08:34:03.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
theojolliffe
null
theojolliffe/bart-cnn-science-v3-e3
2
null
transformers
26,191
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: bart-cnn-science-v3-e3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-cnn-science-v3-e3 This model is a fine-tuned version of [theojolliffe/bart-cnn-science](https://huggingface.co/theojolliffe/bart-cnn-science) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8586 - Rouge1: 53.3497 - Rouge2: 34.0001 - Rougel: 35.6149 - Rougelsum: 50.5723 - Gen Len: 141.3519 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | No log | 1.0 | 398 | 0.9977 | 51.8104 | 31.5395 | 33.6887 | 49.2385 | 142.0 | | 1.1785 | 2.0 | 796 | 0.8875 | 53.7817 | 34.5394 | 35.9556 | 51.3317 | 141.537 | | 0.7376 | 3.0 | 1194 | 0.8586 | 53.3497 | 34.0001 | 35.6149 | 50.5723 | 141.3519 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
jkhan447/sarcasm-detection-xlnet-base-cased
8f73606f35d21007b31883b684d6157b909fc48e
2022-05-31T14:17:58.000Z
[ "pytorch", "tensorboard", "xlnet", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
jkhan447
null
jkhan447/sarcasm-detection-xlnet-base-cased
2
null
transformers
26,192
--- license: mit tags: - generated_from_trainer metrics: - accuracy model-index: - name: sarcasm-detection-xlnet-base-cased results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sarcasm-detection-xlnet-base-cased This model is a fine-tuned version of [xlnet-base-cased](https://huggingface.co/xlnet-base-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.1470 - Accuracy: 0.7117 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Miss/vit-base-beans-demo-v5
6970d60f4438345ef3514a264b6ade0abad95073
2022-06-30T01:19:04.000Z
[ "pytorch", "tensorboard", "vit", "image-classification", "transformers" ]
image-classification
false
Miss
null
Miss/vit-base-beans-demo-v5
2
null
transformers
26,193
Entry not found
eetnawa/StereoKG-DT-SK
420761ed630b209dcae9ec61bb1cdd594bda1758
2022-05-31T10:35:19.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
false
eetnawa
null
eetnawa/StereoKG-DT-SK
2
null
transformers
26,194
--- license: mit ---
eetnawa/StereoKG-DT-UK
4fdf0f19f1ed2b88c9f3214c64a90b4a1518f5a9
2022-05-31T13:11:14.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
false
eetnawa
null
eetnawa/StereoKG-DT-UK
2
null
transformers
26,195
--- license: mit ---
PSW/samsum_reverse_train_min300_max1000_epoch6
5c0b71604f490744478e2fb0d871609bcf5b4b2e
2022-05-31T15:19:24.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_reverse_train_min300_max1000_epoch6
2
null
transformers
26,196
Entry not found
joaogante/test_img
99ff23f1c79ca06a9a95e75e7bd19b9531c2c20e
2022-05-31T15:44:12.000Z
[ "pytorch", "jax", "vit", "feature-extraction", "dataset:imagenet-21k", "arxiv:2010.11929", "arxiv:2006.03677", "transformers", "vision", "license:apache-2.0" ]
feature-extraction
false
joaogante
null
joaogante/test_img
2
null
transformers
26,197
--- license: apache-2.0 tags: - vision datasets: - imagenet-21k inference: false --- # Vision Transformer (base-sized model) Vision Transformer (ViT) model pre-trained on ImageNet-21k (14 million images, 21,843 classes) at resolution 224x224. It was introduced in the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Dosovitskiy et al. and first released in [this repository](https://github.com/google-research/vision_transformer). However, the weights were converted from the [timm repository](https://github.com/rwightman/pytorch-image-models) by Ross Wightman, who already converted the weights from JAX to PyTorch. Credits go to him. Disclaimer: The team releasing ViT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Vision Transformer (ViT) is a transformer encoder model (BERT-like) pretrained on a large collection of images in a supervised fashion, namely ImageNet-21k, at a resolution of 224x224 pixels. Images are presented to the model as a sequence of fixed-size patches (resolution 16x16), which are linearly embedded. One also adds a [CLS] token to the beginning of a sequence to use it for classification tasks. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Note that this model does not provide any fine-tuned heads, as these were zero'd by Google researchers. However, the model does include the pre-trained pooler, which can be used for downstream tasks (such as image classification). By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image. ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=google/vit) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model in PyTorch: ```python from transformers import ViTFeatureExtractor, ViTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') model = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ``` Here is how to use this model in JAX/Flax: ```python from transformers import ViTFeatureExtractor, FlaxViTModel from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') model = FlaxViTModel.from_pretrained('google/vit-base-patch16-224-in21k') inputs = feature_extractor(images=image, return_tensors="np") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ``` ## Training data The ViT model was pretrained on [ImageNet-21k](http://www.image-net.org/), a dataset consisting of 14 million images and 21k classes. ## Training procedure ### Preprocessing The exact details of preprocessing of images during training/validation can be found [here](https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py). Images are resized/rescaled to the same resolution (224x224) and normalized across the RGB channels with mean (0.5, 0.5, 0.5) and standard deviation (0.5, 0.5, 0.5). ### Pretraining The model was trained on TPUv3 hardware (8 cores). All model variants are trained with a batch size of 4096 and learning rate warmup of 10k steps. For ImageNet, the authors found it beneficial to additionally apply gradient clipping at global norm 1. Pre-training resolution is 224. ## Evaluation results For evaluation results on several image classification benchmarks, we refer to tables 2 and 5 of the original paper. Note that for fine-tuning, the best results are obtained with a higher resolution (384x384). Of course, increasing the model size will result in better performance. ### BibTeX entry and citation info ```bibtex @misc{wu2020visual, title={Visual Transformers: Token-based Image Representation and Processing for Computer Vision}, author={Bichen Wu and Chenfeng Xu and Xiaoliang Dai and Alvin Wan and Peizhao Zhang and Zhicheng Yan and Masayoshi Tomizuka and Joseph Gonzalez and Kurt Keutzer and Peter Vajda}, year={2020}, eprint={2006.03677}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ```bibtex @inproceedings{deng2009imagenet, title={Imagenet: A large-scale hierarchical image database}, author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li}, booktitle={2009 IEEE conference on computer vision and pattern recognition}, pages={248--255}, year={2009}, organization={Ieee} } ```
irenelizihui/bert-finetuned-squad
9c91eb47d632756dfca0360a7fda187ba624dd75
2022-06-01T02:53:33.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
irenelizihui
null
irenelizihui/bert-finetuned-squad
2
null
transformers
26,198
Entry not found
muhtasham/RoBERTa-tg
1bbba8c0f258fb487ca6ce285f139ad1bb900c6e
2022-06-01T07:52:30.000Z
[ "pytorch", "roberta", "fill-mask", "tg", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
fill-mask
false
muhtasham
null
muhtasham/RoBERTa-tg
2
1
transformers
26,199
--- language: - tg widget: - text: "Пойтахти <mask> Душанбе" - text: "<mask> ба ин сайти шумо медароям." - text: "Номи ман Акрам <mask>" tags: - generated_from_trainer model-index: - name: RoBERTa-tg results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # RoBERTa-tg This model is a fine-tuned version of [Tajik-Corpus](https://huggingface.co/datasets/muhtasham/tajik-corpus) dataset which is based on Leipzig Corpora. ## Model description You can use model for masked text generation or fine-tune it to a downstream task. ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 8 - seed: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Tokenizers 0.12.1