modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
Nadav/camembert-base-finetuned-on-runaways-fr
a110f5b1e593522b3d8ed450c31926924d7a0459
2022-06-19T14:24:02.000Z
[ "pytorch", "camembert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
Nadav
null
Nadav/camembert-base-finetuned-on-runaways-fr
3
null
transformers
22,600
Entry not found
Nadav/camembert-base-squad-finetuned-on-runaways-fr
d16dc75f9a7a7209b0df14b42026f9237cfd1256
2022-06-19T16:15:05.000Z
[ "pytorch", "camembert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
Nadav
null
Nadav/camembert-base-squad-finetuned-on-runaways-fr
3
null
transformers
22,601
Entry not found
Nonnyss/music-wav2vec2-th-finetune-mark2
a7f302fe6d705dabc5bfc46f981afd5de32a005b
2022-06-16T09:50:21.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
Nonnyss
null
Nonnyss/music-wav2vec2-th-finetune-mark2
3
null
transformers
22,602
Entry not found
S2312dal/M1_MLM
c737f41e3252e905e0df5c3688b5a986eec3820c
2022-06-16T15:54:27.000Z
[ "pytorch", "tensorboard", "albert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
S2312dal
null
S2312dal/M1_MLM
3
null
transformers
22,603
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: M1_MLM results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # M1_MLM This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.2887 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 8.2418 | 1.0 | 25 | 2.4870 | | 2.4653 | 2.0 | 50 | 2.3762 | | 2.2127 | 3.0 | 75 | 2.3000 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
anantoj/T5-summarizer-simple-wiki-v2
2d34e7bc1391f14a5c694fbd391c2abb5aeeb2c2
2022-06-16T16:44:54.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
anantoj
null
anantoj/T5-summarizer-simple-wiki-v2
3
null
transformers
22,604
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: T5-summarizer-simple-wiki-v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # T5-summarizer-simple-wiki-v2 This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0866 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 2.2575 | 1.0 | 14719 | 2.1173 | | 2.2663 | 2.0 | 29438 | 2.0926 | | 2.2092 | 3.0 | 44157 | 2.0866 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0 - Datasets 2.3.2 - Tokenizers 0.12.1
loubnabnl/codeparrot-small-filtered-data
49b0db5f966ba7607ef5e6ce4a2d950bd3d5ca67
2022-06-17T12:32:08.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "license:apache-2.0" ]
text-generation
false
loubnabnl
null
loubnabnl/codeparrot-small-filtered-data
3
null
transformers
22,605
--- license: apache-2.0 ---
eslamxm/MBart-finetuned-ur-xlsum
651940a40484a975fb47985e0414b6a622df437d
2022-06-17T14:59:58.000Z
[ "pytorch", "tensorboard", "mbart", "text2text-generation", "dataset:xlsum", "transformers", "summarization", "ur", "seq2seq", "Abstractive Summarization", "generated_from_trainer", "model-index", "autotrain_compatible" ]
summarization
false
eslamxm
null
eslamxm/MBart-finetuned-ur-xlsum
3
null
transformers
22,606
--- tags: - summarization - ur - seq2seq - mbart - Abstractive Summarization - generated_from_trainer datasets: - xlsum model-index: - name: MBart-finetuned-ur-xlsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # MBart-finetuned-ur-xlsum This model is a fine-tuned version of [facebook/mbart-large-50](https://huggingface.co/facebook/mbart-large-50) on the xlsum dataset. It achieves the following results on the evaluation set: - Loss: 3.2663 - Rouge-1: 40.6 - Rouge-2: 18.9 - Rouge-l: 34.39 - Gen Len: 37.88 - Bertscore: 77.06 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 250 - num_epochs: 5 - label_smoothing_factor: 0.1 ### Training results ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
tuni/xlm-roberta-large-xnli-finetuned-mnli-SJP
d65bdc87bda4e4a3cef4cbb3515c67436d5673a0
2022-06-17T01:52:49.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "text-classification", "dataset:swiss_judgment_prediction", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
tuni
null
tuni/xlm-roberta-large-xnli-finetuned-mnli-SJP
3
null
transformers
22,607
--- license: mit tags: - generated_from_trainer datasets: - swiss_judgment_prediction metrics: - accuracy model-index: - name: xlm-roberta-large-xnli-finetuned-mnli-SJP results: - task: name: Text Classification type: text-classification dataset: name: swiss_judgment_prediction type: swiss_judgment_prediction args: all_languages metrics: - name: Accuracy type: accuracy value: 0.7957142857142857 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-large-xnli-finetuned-mnli-SJP This model is a fine-tuned version of [joeddav/xlm-roberta-large-xnli](https://huggingface.co/joeddav/xlm-roberta-large-xnli) on the swiss_judgment_prediction dataset. It achieves the following results on the evaluation set: - Loss: 1.3456 - Accuracy: 0.7957 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 5 | 1.8460 | 0.7956 | | No log | 2.0 | 10 | 1.3456 | 0.7957 | | No log | 3.0 | 15 | 1.2799 | 0.7957 | | No log | 4.0 | 20 | 1.2866 | 0.7957 | | No log | 5.0 | 25 | 1.3162 | 0.7956 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
gciaffoni/modelLM
313390a009926498686f2b97416ef03bdbcd8614
2022-07-20T15:40:02.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
gciaffoni
null
gciaffoni/modelLM
3
null
transformers
22,608
R4 checkpoint-30000 LM parlamento europeo
hakurei/litv2-6B-rev1
5ef6fd5bb9844d66ca2327f82175cd3b68adf9a4
2022-06-17T04:22:48.000Z
[ "pytorch", "gptj", "text-generation", "transformers" ]
text-generation
false
hakurei
null
hakurei/litv2-6B-rev1
3
null
transformers
22,609
https://wandb.ai/haruu/mesh-transformer-jax/runs/68jerq7d?workspace=user-haruu
erickfm/rosy-sweep-3
0d9db01c9c71dd145e76a5d6dd1b7e6ad22a38a3
2022-06-17T07:54:58.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/rosy-sweep-3
3
null
transformers
22,610
Entry not found
powerwarez/kindword-model
02b37fd36aae65c2e4a9daf639a5652bdf16e56a
2022-06-17T11:11:54.000Z
[ "pytorch", "bert", "text-classification", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
powerwarez
null
powerwarez/kindword-model
3
null
transformers
22,611
--- tags: - generated_from_trainer model-index: - name: kindword-model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kindword-model This model is a fine-tuned version of [klue/bert-base](https://huggingface.co/klue/bert-base) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
EddieChen372/gpt2-jest
137722ae59766b5cc2eb5d8522091fe967a69189
2022-06-23T08:04:41.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
EddieChen372
null
EddieChen372/gpt2-jest
3
null
transformers
22,612
Entry not found
Nadav/robbert-base-finetuned-on-runaways-nl
0810b7e77ee8294bab8c15952ccc4481379d944f
2022-06-18T09:07:21.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
Nadav
null
Nadav/robbert-base-finetuned-on-runaways-nl
3
null
transformers
22,613
Entry not found
sgraf202/finetuning-sentiment-model-3000-samples
19a2289e851f3518cd71e3c150638d87783b0392
2022-07-11T10:57:24.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
sgraf202
null
sgraf202/finetuning-sentiment-model-3000-samples
3
null
transformers
22,614
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7404 - Accuracy: 0.4688 - F1: 0.5526 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
dominguesm/stt_pt_quartznet15x5_ctc_small
14fecbfd291ade80a3624d5c2399a30be2d6fe49
2022-06-26T01:05:06.000Z
[ "nemo", "pt", "dataset:mozilla-foundation/common_voice_9_0", "automatic-speech-recognition", "speech", "audio", "CTC", "QuartzNet", "Transformer", "NeMo", "pytorch", "license:cc-by-4.0", "model-index" ]
automatic-speech-recognition
false
dominguesm
null
dominguesm/stt_pt_quartznet15x5_ctc_small
3
2
nemo
22,615
--- language: - pt license: cc-by-4.0 library_name: nemo datasets: - mozilla-foundation/common_voice_9_0 thumbnail: null tags: - automatic-speech-recognition - speech - audio - CTC - QuartzNet - Transformer - NeMo - pytorch model-index: - name: stt_pt_quartznet15x5_ctc_small results: - task: type: automatic-speech-recognition dataset: type: common_voice name: Common Voice Portuguese config: clean split: test args: language: pt metrics: - type: wer value: 49.17 name: Test WER - type: cer value: 18.59 name: Test CER --- ## Model Overview This model transcribes speech in lower case Portuguese alphabet along with spaces. It is a "small" versions of QuartzNet-CTC model. ## NVIDIA NeMo: Training To train, fine-tune or play with the model you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed latest Pytorch version. ``` pip install nemo_toolkit['all'] ``` ## How to Use this Model The model is available for use in the NeMo toolkit [1], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset. ### Automatically instantiate the model ```python import nemo.collections.asr as nemo_asr asr_model = nemo_asr.models.ASRModel.from_pretrained("dominguesm/stt_pt_quartznet15x5_ctc_small") ``` ### Transcribing using Python First, let's get a sample ``` wget https://github.com/DominguesM/stt_pt_quartznet15x5_ctc_small/raw/main/audios/common_voice_pt_25555332.mp3 ``` Then simply do: ``` asr_model.transcribe(['common_voice_pt_25555332.mp3']) ``` ### Transcribing many audio files ```shell python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py pretrained_name="dominguesm/stt_pt_quartznet15x5_ctc_small" audio_dir="<DIRECTORY CONTAINING AUDIO FILES>" ``` ### Input This model accepts 16000 KHz Mono-channel Audio (wav files) as input. ### Output This model provides transcribed speech as a string for a given audio sample. ## Model Architecture This model are based on the QuartzNet architecture, which is a variant of Jasper that uses 1D time-channel separable convolutional layers in its convolutional residual blocks and are therefore smaller than Jasper models. QuartzNet models take in audio segments and transcribe them to letter, byte pair, or word piece sequences. ## Training All training scripts will be available at: [DominguesM/stt_pt_quartznet15x5_ctc_small](https://github.com/DominguesM/stt_pt_quartznet15x5_ctc_small) ### Datasets The model was trained with a part of the Common Voices 9.0 dataset in Portuguese, totaling 26 hours of audio. * Mozilla Common Voice (v9.0) ## Performance | Metric | Score | | ------- | ----- | | WER | 49% | | CER | 18% | The metrics were obtained using the following code: **Attention**: The steps below must be performed after downloading the dataset (Mozilla Commom Voices 9.0 PT) and following the steps of pre-processing the audio data and `manifest` files contained in the file [`notebooks/Finetuning CTC model Portuguese.ipynb`](https://github.com/DominguesM/stt_pt_quartznet15x5_ctc_small) ```bash $ wget -P scripts/ "https://raw.githubusercontent.com/NVIDIA/NeMo/v1.9.0/examples/asr/speech_to_text_eval.py" $ wget -P scripts/ "https://raw.githubusercontent.com/NVIDIA/NeMo/v1.9.0/examples/asr/transcribe_speech.py" $ python scripts/speech_to_text_eval.py \ pretrained_name="dominguesm/stt_pt_quartznet15x5_ctc_small" \ dataset_manifest="manifests/pt/commonvoice_test_manifest_processed.json" \ output_filename="./evaluation_transcripts.json" \ batch_size=32 \ amp=true \ use_cer=false ``` ## Limitations Since this model was trained on publically available speech datasets, the performance of this model might degrade for speech which includes technical terms, or vernacular that the model has not been trained on. The model might also perform worse for accented speech. ## Citation If you use our work, please cite: ```cite @misc{domingues2022quartznet15x15-small-portuguese, title={Fine-tuned {Quartznet}-15x5 CTC small model for speech recognition in {P}ortuguese}, author={Domingues, Maicon}, howpublished={\url{https://huggingface.co/dominguesm/stt_pt_quartznet15x5_ctc_small}}, year={2022} } ``` ## References [1] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo)
huggingtweets/g2esports
107c777b4b085294c497eb44704f012c5bc34513
2022-06-19T18:55:40.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/g2esports
3
null
transformers
22,616
--- language: en thumbnail: http://www.huggingtweets.com/g2esports/1655664936018/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1531198610129428480/GoplyEsx_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">G2 Esports</div> <div style="text-align: center; font-size: 14px;">@g2esports</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from G2 Esports. | Data | G2 Esports | | --- | --- | | Tweets downloaded | 3250 | | Retweets | 342 | | Short tweets | 938 | | Tweets kept | 1970 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1h6b63sz/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @g2esports's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/724imy81) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/724imy81/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/g2esports') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
joshanashakya/codebert_sourcecode_nmt_pn2ja_100E_2e-05LR_16B_12E_12D
53e8ce99cf2be7383e3971e4c8acb27a9cce2df3
2022-06-20T03:42:08.000Z
[ "pytorch", "encoder-decoder", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
joshanashakya
null
joshanashakya/codebert_sourcecode_nmt_pn2ja_100E_2e-05LR_16B_12E_12D
3
null
transformers
22,617
Entry not found
anonsubms/lm_giga
a52bfc4563b0720fdb568ffdf0fd33a48fc6440a
2022-06-21T04:40:45.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
anonsubms
null
anonsubms/lm_giga
3
null
transformers
22,618
Entry not found
roshnir/xlmr-finetuned-mlqa-dev-cross-de-hi
48a6d1164fabbceb3dacbdf25e4d783454162680
2022-06-21T18:52:49.000Z
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/xlmr-finetuned-mlqa-dev-cross-de-hi
3
null
transformers
22,619
Entry not found
lmqg/bart-base-squadshifts-reddit
e5cdd82a07cdb96ae1014033214e6349d7f1033c
2022-06-22T10:48:50.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
lmqg
null
lmqg/bart-base-squadshifts-reddit
3
null
transformers
22,620
Entry not found
transZ/M2M_Vi_Ba
b5d39b2b5ccbfc35084db2cb692b7f0a5af959f6
2022-06-23T11:01:27.000Z
[ "pytorch", "m2m_100", "text2text-generation", "vi", "ba", "dataset:custom dataset", "transformers", "translation", "autotrain_compatible" ]
translation
false
transZ
null
transZ/M2M_Vi_Ba
3
null
transformers
22,621
--- language: - vi - ba tags: - translation datasets: - custom dataset metrics: - bleu - sacrebleu --- # How to run the model ```python from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer model = M2M100ForConditionalGeneration.from_pretrained("transZ/M2M_Vi_Ba") tokenizer = M2M100Tokenizer.from_pretrained("transZ/M2M_Vi_Ba") tokenizer.src_lang = "vi" vi_text = "Hôm nay ba đi chợ." encoded_vi = tokenizer(vi_text, return_tensors="pt") generated_tokens = model.generate(**encoded_vi, forced_bos_token_id=tokenizer.get_lang_id("ba")) translate = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0] print(translate) ```
munggok/infoxlm-large-squad
46a7dd085e6d5b74918c2064e2b0eb97906db89d
2022-06-22T23:44:33.000Z
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
munggok
null
munggok/infoxlm-large-squad
3
null
transformers
22,622
Entry not found
aico/TrOCR-MNIST
2e3f28ee86838a11811d0c17e4a85f0d981c95b7
2022-06-23T10:38:57.000Z
[ "pytorch", "vision-encoder-decoder", "transformers" ]
null
false
aico
null
aico/TrOCR-MNIST
3
null
transformers
22,623
Fine Tune MNIST dataset on the ViT TrOCR model accuracy = 0.99525 ref: http://yann.lecun.com/exdb/mnist/ https://github.com/microsoft/unilm/tree/master/trocr
Rahulrr/language_model_en_he
82e265712f0f6453f5e684815b657b4afb35358f
2022-06-24T05:31:17.000Z
[ "pytorch", "marian", "text2text-generation", "en", "he", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
false
Rahulrr
null
Rahulrr/language_model_en_he
3
null
transformers
22,624
--- language: - en - he tags: - translation license: apache-2.0 --- ### en-he * source group: English * target group: Hebrew * OPUS readme: [eng-heb](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-heb/README.md) * model: transformer-align * source language(s): eng * target language(s): heb * model: transformer-align * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus+bt-2021-04-13.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-heb/opus+bt-2021-04-13.zip) * test set translations: [opus+bt-2021-04-13.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-heb/opus+bt-2021-04-13.test.txt) * test set scores: [opus+bt-2021-04-13.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-heb/opus+bt-2021-04-13.eval.txt) ## Benchmarks | testset | BLEU | chr-F | #sent | #words | BP | |---------|-------|-------|-------|--------|----| | Tatoeba-test.eng-heb | 37.8 | 0.601 | 10000 | 60359 | 1.000 | ### System Info: - hf_name: en-he - source_languages: eng - target_languages: heb - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-heb/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['en', 'he'] - src_constituents: ('English', {'eng'}) - tgt_constituents: ('Hebrew', {'heb'}) - src_multilingual: False - tgt_multilingual: False - long_pair: eng-heb - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-heb/opus+bt-2021-04-13.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/eng-heb/opus+bt-2021-04-13.test.txt - src_alpha3: eng - tgt_alpha3: heb - chrF2_score: 0.601 - bleu: 37.8 - src_name: English - tgt_name: Hebrew - train_date: 2021-04-13 00:00:00 - src_alpha2: en - tgt_alpha2: he - prefer_old: False - short_pair: en-he - helsinki_git_sha: c4e978d8de47875b482653b423dcfe968979d7d5 - transformers_git_sha: 56b83cf049823ed074a655eceb28f31e2077c6eb - port_machine: LAPIN4GLQ2G3 - port_time: 2022-06-22-19:47
mohsenfayyaz/bert-base-parsbert-uncased_parsquad
b8753838f4efafb90873ff5e524634ff06f48801
2022-06-24T09:50:19.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
mohsenfayyaz
null
mohsenfayyaz/bert-base-parsbert-uncased_parsquad
3
null
transformers
22,625
Entry not found
Splend1dchan/t5lephone-small-textsquad
8d328502424903feb4aef85e0157ef44dcf0a934
2022-06-24T09:47:41.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Splend1dchan
null
Splend1dchan/t5lephone-small-textsquad
3
null
transformers
22,626
Entry not found
Servarr/bert-finetuned-radarr
86c3322b93aea497da160969fef732b08457ef4e
2022-06-24T16:40:53.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "dataset:movie_releases", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
Servarr
null
Servarr/bert-finetuned-radarr
3
null
transformers
22,627
--- license: apache-2.0 tags: - generated_from_trainer datasets: - movie_releases metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-radarr results: - task: name: Token Classification type: token-classification dataset: name: movie_releases type: movie_releases args: default metrics: - name: Precision type: precision value: 0.9555421444377389 - name: Recall type: recall value: 0.9638798701298701 - name: F1 type: f1 value: 0.9596928982725529 - name: Accuracy type: accuracy value: 0.9817602584524263 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-radarr This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the movie_releases dataset. It achieves the following results on the evaluation set: - Loss: 0.0731 - Precision: 0.9555 - Recall: 0.9639 - F1: 0.9597 - Accuracy: 0.9818 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0431 | 1.0 | 1191 | 0.1403 | 0.9436 | 0.9574 | 0.9504 | 0.9626 | | 0.0236 | 2.0 | 2382 | 0.0881 | 0.9485 | 0.9560 | 0.9522 | 0.9694 | | 0.0138 | 3.0 | 3573 | 0.0731 | 0.9555 | 0.9639 | 0.9597 | 0.9818 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
mohsenfayyaz/albert-fa-base-v2_pquad_and_persian_qa
72fbfa8544039979e34e6f930965b95c59e87446
2022-06-24T12:19:42.000Z
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
mohsenfayyaz
null
mohsenfayyaz/albert-fa-base-v2_pquad_and_persian_qa
3
null
transformers
22,628
Entry not found
mohsenfayyaz/bert-base-parsbert-uncased_pquad_lr1e-5
8aa35d79be0344a5ae407047ff55475bfd9aef93
2022-06-24T13:06:50.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
mohsenfayyaz
null
mohsenfayyaz/bert-base-parsbert-uncased_pquad_lr1e-5
3
null
transformers
22,629
Entry not found
voidful/phoneme-longt5-global
ec8941a24d8bf89f826ce397b537ebe13bd61411
2022-06-25T04:36:55.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
voidful
null
voidful/phoneme-longt5-global
3
null
transformers
22,630
Entry not found
Chemsseddine/bert2gpt2_med_v2
f78142c0c70fa982258ce6d1d503597d0624cd7d
2022-06-30T19:53:14.000Z
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
Chemsseddine
null
Chemsseddine/bert2gpt2_med_v2
3
null
transformers
22,631
--- tags: - generated_from_trainer metrics: - rouge model-index: - name: bert2gpt2_med_v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> <img src="https://huggingface.co/Chemsseddine/bert2gpt2_med_ml_orange_summ-finetuned_med_sum_new-finetuned_med_sum_new/resolve/main/logobert2gpt2.png" alt="Map of positive probabilities per country." width="200"/> # bert2gpt2_med_v2 This model is a fine-tuned version of [Chemsseddine/bert2gpt2SUMM-finetuned-mlsum-finetuned-mlorange_sum](https://huggingface.co/Chemsseddine/bert2gpt2SUMM-finetuned-mlsum-finetuned-mlorange_sum) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0684 - Rouge1: 34.1248 - Rouge2: 17.7006 - Rougel: 33.4661 - Rougelsum: 33.4419 - Gen Len: 22.6429 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 2.9107 | 1.0 | 1000 | 2.0877 | 30.4547 | 14.4024 | 30.3642 | 30.3788 | 21.9714 | | 1.8782 | 2.0 | 2000 | 1.8151 | 32.6607 | 16.8089 | 32.3844 | 32.4762 | 21.7714 | | 1.291 | 3.0 | 3000 | 1.7523 | 33.6391 | 16.7866 | 32.4256 | 32.3306 | 22.7429 | | 0.819 | 4.0 | 4000 | 1.7650 | 35.0633 | 19.1222 | 34.4902 | 34.6796 | 22.4714 | | 0.4857 | 5.0 | 5000 | 1.8129 | 33.8763 | 16.9303 | 32.8845 | 32.9225 | 22.3857 | | 0.3232 | 6.0 | 6000 | 1.9339 | 33.9272 | 17.1784 | 32.9301 | 33.0253 | 22.4286 | | 0.2022 | 7.0 | 7000 | 1.9634 | 33.9869 | 16.4238 | 33.7336 | 33.65 | 22.6429 | | 0.1452 | 8.0 | 8000 | 2.0090 | 33.8892 | 18.2723 | 33.7514 | 33.6531 | 22.5714 | | 0.0845 | 9.0 | 9000 | 2.0337 | 33.9649 | 17.1339 | 33.5061 | 33.4157 | 22.7857 | | 0.0531 | 10.0 | 10000 | 2.0684 | 34.1248 | 17.7006 | 33.4661 | 33.4419 | 22.6429 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
mikesong724/deberta-wiki-2006
97086580b99643ca1400ade3e273dd48ad25af8b
2022-06-25T17:11:39.000Z
[ "pytorch", "deberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
mikesong724
null
mikesong724/deberta-wiki-2006
3
null
transformers
22,632
DeBERTa trained from scratch Source data: https://dumps.wikimedia.org/archive/2006/ Tools used: https://github.com/mikesong724/Point-in-Time-Language-Model 2006 wiki archive 2.7 GB trained 24 epochs = 65GB GLUE benchmark cola (3e): matthews corr: 0.2848 sst2 (3e): acc: 0.8876 mrpc (5e): F1: 0.8033, acc: 0.7108 stsb (3e): pearson: 0.7542, spearman: 0.7536 qqp (3e): acc: 0.8852, F1: 0.8461 mnli (3e): acc_mm: 0.7822 qnli (3e): acc: 0.8715 rte (3e): acc: 0.5235 wnli (5e): acc: 0.3099
haritzpuerto/xtremedistil-l6-h256-uncased-squad_1.1
f893dedca53ad8ca62c1a3746e82f9dcecef7500
2022-06-25T19:13:22.000Z
[ "pytorch", "bert", "question-answering", "en", "dataset:squad", "transformers", "QA", "Question Answering", "SQuAD", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
haritzpuerto
null
haritzpuerto/xtremedistil-l6-h256-uncased-squad_1.1
3
null
transformers
22,633
--- language: - en tags: - QA - Question Answering - SQuAD license: "mit" datasets: - squad metrics: - squad model-index: - name: xtremedistil-l6-h256-uncased results: - task: type: question-answering # Required. Example: automatic-speech-recognition name: Question Answering # Optional. Example: Speech Recognition dataset: type: squad # Required. Example: common_voice. Use dataset id from https://hf.co/datasets name: SQuAD # Required. A pretty name for the dataset. Example: Common Voice (French) split: validation # Optional. Example: test metrics: - type: squad # Required. Example: wer. Use metric id from https://hf.co/metrics value: 62.66792809839168 # Required. Example: 20.90 name: SQuAD EM # Optional. Example: Test WER config: exact_match # Optional. The name of the metric configuration used in `load_metric()`. Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`. See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations - type: squad # Required. Example: wer. Use metric id from https://hf.co/metrics value: 74.99490608582015 # Required. Example: 20.90 name: SQuAD F1 # Optional. Example: Test WER config: F1 --- microsoft/xtremedistil-l6-h256-uncased fined-tuned on SQuAD (https://huggingface.co/datasets/squad) Hyperparameters: - epochs: 1 - lr: 1e-5 - train batch sie: 16 - optimizer: adamW - lr_scheduler: linear - num warming steps: 0 - max_length: 512 Results on the dev set: - 'exact_match': 62.66792809839168 - 'f1': 74.99490608582015
ipvikas/vit-demo
fc1d9e153faafc87946908454afd4796cb9e0887
2022-06-26T02:00:52.000Z
[ "pytorch", "vit", "image-classification", "transformers" ]
image-classification
false
ipvikas
null
ipvikas/vit-demo
3
null
transformers
22,634
Entry not found
hyan97/distilbert-base-uncased-finetuned-squad
d8f82ef19199e7e2119f387313630491257699f4
2022-06-26T05:55:35.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
hyan97
null
hyan97/distilbert-base-uncased-finetuned-squad
3
null
transformers
22,635
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.3517 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2094 | 1.0 | 8235 | 1.2174 | | 0.9515 | 2.0 | 16470 | 1.1923 | | 0.7687 | 3.0 | 24705 | 1.3517 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Tokenizers 0.12.1
BitanBiswas/deepD
6ad3656ba96c0224a8760ff1186c3b3d9dcc2a53
2022-06-26T12:12:50.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
BitanBiswas
null
BitanBiswas/deepD
3
null
transformers
22,636
Entry not found
sudo-s/exper_batch_8_e4
5364fbcc570c2b5ee3cec3bfbab4ddd79709efa5
2022-06-26T15:33:41.000Z
[ "pytorch", "tensorboard", "vit", "image-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
image-classification
false
sudo-s
null
sudo-s/exper_batch_8_e4
3
null
transformers
22,637
--- license: apache-2.0 tags: - image-classification - generated_from_trainer metrics: - accuracy model-index: - name: exper_batch_8_e4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # exper_batch_8_e4 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the sudo-s/herbier_mesuem1 dataset. It achieves the following results on the evaluation set: - Loss: 0.3353 - Accuracy: 0.9183 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Apex, opt level O1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 4.2251 | 0.08 | 100 | 4.1508 | 0.1203 | | 3.4942 | 0.16 | 200 | 3.5566 | 0.2082 | | 3.2871 | 0.23 | 300 | 3.0942 | 0.3092 | | 2.7273 | 0.31 | 400 | 2.8338 | 0.3308 | | 2.4984 | 0.39 | 500 | 2.4860 | 0.4341 | | 2.3423 | 0.47 | 600 | 2.2201 | 0.4796 | | 1.8785 | 0.55 | 700 | 2.1890 | 0.4653 | | 1.8012 | 0.63 | 800 | 1.9901 | 0.4865 | | 1.7236 | 0.7 | 900 | 1.6821 | 0.5736 | | 1.4949 | 0.78 | 1000 | 1.5422 | 0.6083 | | 1.5573 | 0.86 | 1100 | 1.5436 | 0.6110 | | 1.3241 | 0.94 | 1200 | 1.4077 | 0.6207 | | 1.0773 | 1.02 | 1300 | 1.1417 | 0.6916 | | 0.7935 | 1.1 | 1400 | 1.1194 | 0.6931 | | 0.7677 | 1.17 | 1500 | 1.0727 | 0.7167 | | 0.9468 | 1.25 | 1600 | 1.0707 | 0.7136 | | 0.7563 | 1.33 | 1700 | 0.9427 | 0.7390 | | 0.8471 | 1.41 | 1800 | 0.8906 | 0.7571 | | 0.9998 | 1.49 | 1900 | 0.8098 | 0.7845 | | 0.6039 | 1.57 | 2000 | 0.7244 | 0.8034 | | 0.7052 | 1.64 | 2100 | 0.7881 | 0.7953 | | 0.6753 | 1.72 | 2200 | 0.7458 | 0.7926 | | 0.3758 | 1.8 | 2300 | 0.6987 | 0.8022 | | 0.4985 | 1.88 | 2400 | 0.6286 | 0.8265 | | 0.4122 | 1.96 | 2500 | 0.5949 | 0.8358 | | 0.1286 | 2.04 | 2600 | 0.5691 | 0.8385 | | 0.1989 | 2.11 | 2700 | 0.5535 | 0.8389 | | 0.3304 | 2.19 | 2800 | 0.5261 | 0.8520 | | 0.3415 | 2.27 | 2900 | 0.5504 | 0.8477 | | 0.4066 | 2.35 | 3000 | 0.5418 | 0.8497 | | 0.1208 | 2.43 | 3100 | 0.5156 | 0.8612 | | 0.1668 | 2.51 | 3200 | 0.5655 | 0.8539 | | 0.0727 | 2.58 | 3300 | 0.4971 | 0.8658 | | 0.0929 | 2.66 | 3400 | 0.4962 | 0.8635 | | 0.0678 | 2.74 | 3500 | 0.4903 | 0.8670 | | 0.1212 | 2.82 | 3600 | 0.4357 | 0.8867 | | 0.1579 | 2.9 | 3700 | 0.4642 | 0.8739 | | 0.2625 | 2.98 | 3800 | 0.3994 | 0.8951 | | 0.024 | 3.05 | 3900 | 0.3953 | 0.8971 | | 0.0696 | 3.13 | 4000 | 0.3883 | 0.9056 | | 0.0169 | 3.21 | 4100 | 0.3755 | 0.9086 | | 0.023 | 3.29 | 4200 | 0.3685 | 0.9109 | | 0.0337 | 3.37 | 4300 | 0.3623 | 0.9109 | | 0.0123 | 3.45 | 4400 | 0.3647 | 0.9067 | | 0.0159 | 3.52 | 4500 | 0.3630 | 0.9082 | | 0.0154 | 3.6 | 4600 | 0.3522 | 0.9094 | | 0.0112 | 3.68 | 4700 | 0.3439 | 0.9163 | | 0.0219 | 3.76 | 4800 | 0.3404 | 0.9194 | | 0.0183 | 3.84 | 4900 | 0.3371 | 0.9183 | | 0.0103 | 3.92 | 5000 | 0.3362 | 0.9183 | | 0.0357 | 3.99 | 5100 | 0.3353 | 0.9183 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.5.1 - Datasets 2.3.2 - Tokenizers 0.12.1
TheRensselaerIDEA/gpt2-large-covid-tweet-response
78af37e631c7e6c1b7fa85df3fbb40ecfc975fb0
2022-06-27T07:26:54.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "arxiv:2204.04353", "transformers", "license:mit" ]
text-generation
false
TheRensselaerIDEA
null
TheRensselaerIDEA/gpt2-large-covid-tweet-response
3
null
transformers
22,638
--- license: mit --- Base model: [gpt2-large](https://huggingface.co/gpt2-large) Fine-tuned to generate responses on a dataset of [COVID-19 public health tweets](https://github.com/TheRensselaerIDEA/generative-response-modeling). For more information about the dataset, task and training, see [our paper](https://arxiv.org/abs/2204.04353). This checkpoint corresponds to the lowest validation perplexity (3.36 at 2 epochs) seen during training. See Training metrics for Tensorboard logs. Also see: our [Vaccine public health tweet response model](https://huggingface.co/TheRensselaerIDEA/gpt2-large-vaccine-tweet-response). **Data input format:** <span style="color:red"><|message|></span>public health message<span style="color:red"><|author|></span>public health Twitter handle<span style="color:red"><|response|></span> Example: ```python from transformers import AutoTokenizer, AutoModelForCausalLM from transformers.trainer_utils import set_seed import torch tokenizer = AutoTokenizer.from_pretrained("TheRensselaerIDEA/gpt2-large-covid-tweet-response") model = AutoModelForCausalLM.from_pretrained("TheRensselaerIDEA/gpt2-large-covid-tweet-response") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) set_seed(33) message = "Is your child worried about #COVID19? Learn the facts so you can answer your children’s questions." author = "CDCgov" num_responses = 2 author_token, message_token, response_token = tokenizer.additional_special_tokens input_str = f"{message_token}{message}{author_token}{author}{response_token}" inputs = tokenizer(input_str, return_tensors="pt").to(device) responses_ids = model.generate(**inputs, max_new_tokens=100, pad_token_id=tokenizer.pad_token_id, do_sample=True, top_p=0.95, temperature=1.5, num_beams=3, early_stopping=True, num_return_sequences=num_responses) responses = [tokenizer.decode(r[inputs.input_ids.shape[-1]:], skip_special_tokens=True) for r in responses_ids] for i, resp in enumerate(responses): print(f"Response {i}: {resp}\n") ``` Output: ``` Response 0: @CDCgov I'm not worried. I don't know who needs to hear this, but I have a feeling I know who will be listening. It is not the virus. It is the media. I know you and CDC have been lying for months now, but the media will keep pushing this lie. Response 1: #WashYourHands to help #StopTheSpread of #COVID19 and other diseases. Learn more about hand washing: #HandWashing ```
Shanny/bert-finetuned-squad
c513aa02990cea43f67b1c1abf8afdf7c28d766d
2022-06-28T10:07:41.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Shanny
null
Shanny/bert-finetuned-squad
3
null
transformers
22,639
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
vinayak361/token_final_tunned
02316bc6f7fb360a7417461891dca8d32e57e672
2022-07-05T07:54:14.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
vinayak361
null
vinayak361/token_final_tunned
3
null
transformers
22,640
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: token_final_tunned results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # token_final_tunned This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4670 - Precision: 0.8269 - Recall: 0.8442 - F1: 0.8355 - Accuracy: 0.8516 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 108 | 0.7286 | 0.6581 | 0.7117 | 0.6838 | 0.7272 | | No log | 2.0 | 216 | 0.5497 | 0.7529 | 0.7823 | 0.7673 | 0.8053 | | No log | 3.0 | 324 | 0.4884 | 0.7911 | 0.8145 | 0.8026 | 0.8277 | | No log | 4.0 | 432 | 0.4723 | 0.8144 | 0.8278 | 0.8210 | 0.8408 | | 0.6038 | 5.0 | 540 | 0.4597 | 0.8032 | 0.8315 | 0.8171 | 0.8428 | | 0.6038 | 6.0 | 648 | 0.4583 | 0.8208 | 0.8322 | 0.8264 | 0.8480 | | 0.6038 | 7.0 | 756 | 0.4641 | 0.8290 | 0.8442 | 0.8365 | 0.8520 | | 0.6038 | 8.0 | 864 | 0.4670 | 0.8269 | 0.8442 | 0.8355 | 0.8516 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu102 - Datasets 2.2.2 - Tokenizers 0.12.1
egumasa/roberta-base-finetuned-academic
7945b0138af5601f54453ce34139795307dad627
2022-06-28T05:06:29.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "dataset:elsevier-oa-cc-by", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
fill-mask
false
egumasa
null
egumasa/roberta-base-finetuned-academic
3
null
transformers
22,641
--- license: mit tags: - generated_from_trainer datasets: - elsevier-oa-cc-by model-index: - name: roberta-base-finetuned-academic results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-finetuned-academic This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the elsevier-oa-cc-by dataset. It achieves the following results on the evaluation set: - Loss: 2.1158 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 2.1903 | 0.25 | 1025 | 2.0998 | | 2.1752 | 0.5 | 2050 | 2.1186 | | 2.1864 | 0.75 | 3075 | 2.1073 | | 2.1874 | 1.0 | 4100 | 2.1177 | | 2.1669 | 1.25 | 5125 | 2.1091 | | 2.1859 | 1.5 | 6150 | 2.1212 | | 2.1783 | 1.75 | 7175 | 2.1096 | | 2.1734 | 2.0 | 8200 | 2.0998 | | 2.1712 | 2.25 | 9225 | 2.0972 | | 2.1812 | 2.5 | 10250 | 2.1051 | | 2.1811 | 2.75 | 11275 | 2.1150 | | 2.1826 | 3.0 | 12300 | 2.1097 | | 2.172 | 3.25 | 13325 | 2.1115 | | 2.1745 | 3.5 | 14350 | 2.1098 | | 2.1758 | 3.75 | 15375 | 2.1101 | | 2.1834 | 4.0 | 16400 | 2.1232 | | 2.1836 | 4.25 | 17425 | 2.1052 | | 2.1791 | 4.5 | 18450 | 2.1186 | | 2.172 | 4.75 | 19475 | 2.1039 | | 2.1797 | 5.0 | 20500 | 2.1015 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
chisun/mt5-small-finetuned-amazon-en-es-accelerate
1a2a07fd001c3011eb859c77c7beafa65f8eb395
2022-06-27T07:52:26.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
chisun
null
chisun/mt5-small-finetuned-amazon-en-es-accelerate
3
null
transformers
22,642
Entry not found
YuanWellspring/wav2vec2-nsc-final_1-google-colab
bcd4f0afd409c37eff52452d795afe1e180dc1c9
2022-06-27T09:21:32.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "model-index" ]
automatic-speech-recognition
false
YuanWellspring
null
YuanWellspring/wav2vec2-nsc-final_1-google-colab
3
null
transformers
22,643
--- tags: - generated_from_trainer model-index: - name: wav2vec2-nsc-final_1-google-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-nsc-final_1-google-colab This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.10.3
PSW/xsum_samsum_threshold0.25_epoch3
5703b74bda2cb70fb3c80b20c404cf62f4bc7f69
2022-06-27T16:03:29.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/xsum_samsum_threshold0.25_epoch3
3
null
transformers
22,644
Entry not found
fvancesco/tmp_date
ca35035fa0be7785c87a4f1e52e980d7eff0dd0a
2022-06-27T23:47:48.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
false
fvancesco
null
fvancesco/tmp_date
3
null
transformers
22,645
--- license: mit ---
Adars/bert-base-cased-finetuned-squad
d4a1dd98ab1144cb926ca2f0c20814d06e690e1a
2022-06-28T16:15:44.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
Adars
null
Adars/bert-base-cased-finetuned-squad
3
null
transformers
22,646
Entry not found
sumitrsch/muril_large_multiconer22_bn
ba06a04068ff99e43882024a2fbe060fa7056a69
2022-06-30T12:39:24.000Z
[ "pytorch", "bert", "token-classification", "transformers", "license:afl-3.0", "autotrain_compatible" ]
token-classification
false
sumitrsch
null
sumitrsch/muril_large_multiconer22_bn
3
2
transformers
22,647
--- license: afl-3.0 ---
Akihiro2/mt5-small-finetuned-amazon-en-es
8d446a3ec88beb9eb0d07ff53dd7d40380f44b0a
2022-07-14T03:35:54.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Akihiro2
null
Akihiro2/mt5-small-finetuned-amazon-en-es
3
null
transformers
22,648
Entry not found
roshnir/mBert-finetuned-mlqa-dev-samelen-en-hi
b4f4a1b981e647bb63b58b04cd0c30fb1546eecb
2022-06-28T19:56:22.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
roshnir
null
roshnir/mBert-finetuned-mlqa-dev-samelen-en-hi
3
null
transformers
22,649
Entry not found
RuiqianLi/wav2vec2-large-960h-lv60-self-4-gram_fine-tune_real_29_Jun
03d2c1b4ba74b4607b9c39e183c0b364f4ce96f3
2022-06-29T08:44:53.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:uob_singlish", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
RuiqianLi
null
RuiqianLi/wav2vec2-large-960h-lv60-self-4-gram_fine-tune_real_29_Jun
3
null
transformers
22,650
--- license: apache-2.0 tags: - generated_from_trainer datasets: - uob_singlish model-index: - name: wav2vec2-large-960h-lv60-self-4-gram_fine-tune_real_29_Jun results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-960h-lv60-self-4-gram_fine-tune_real_29_Jun This model is a fine-tuned version of [facebook/wav2vec2-large-960h-lv60-self](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self) on the uob_singlish dataset. It achieves the following results on the evaluation set: - Loss: 1.2895 - Wer: 0.4583 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 2.1283 | 1.82 | 20 | 1.5236 | 0.5764 | | 1.3015 | 3.64 | 40 | 1.2956 | 0.4931 | | 0.9918 | 5.45 | 60 | 1.3087 | 0.5347 | | 0.849 | 7.27 | 80 | 1.2914 | 0.5139 | | 0.6191 | 9.09 | 100 | 1.2895 | 0.4583 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
coolzhao/xlm-roberta-base-finetuned-panx-de
15e178f8aff0d73797e91da1c3c00f44f8d6e0a2
2022-06-29T07:14:20.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
coolzhao
null
coolzhao/xlm-roberta-base-finetuned-panx-de
3
null
transformers
22,651
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8600306626540231 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1356 - F1: 0.8600 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2525 | 1.0 | 525 | 0.1673 | 0.8294 | | 0.1298 | 2.0 | 1050 | 0.1381 | 0.8510 | | 0.0839 | 3.0 | 1575 | 0.1356 | 0.8600 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0 - Datasets 1.16.1 - Tokenizers 0.10.3
ss756/bert-base-cased-finetuned-squad
4ee4277eb72c1a6ac4717e22011a68a81f53d6ca
2022-07-04T10:21:07.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
ss756
null
ss756/bert-base-cased-finetuned-squad
3
null
transformers
22,652
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-cased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-cased-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.0081 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.0071 | 1.0 | 22183 | 1.0081 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.9.0+cu111 - Datasets 2.1.0 - Tokenizers 0.12.1
yaakov/demo-transfer-learning
b37bacf13e08912efdc933d01ea70c9fce639e90
2022-06-29T13:59:55.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
yaakov
null
yaakov/demo-transfer-learning
3
null
transformers
22,653
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy - f1 model-index: - name: demo-transfer-learning results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: mrpc metrics: - name: Accuracy type: accuracy value: 0.8553921568627451 - name: F1 type: f1 value: 0.8991452991452993 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # demo-transfer-learning This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.6183 - Accuracy: 0.8554 - F1: 0.8991 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 459 | 0.3771 | 0.8358 | 0.8784 | | 0.5168 | 2.0 | 918 | 0.4530 | 0.8578 | 0.9033 | | 0.3018 | 3.0 | 1377 | 0.6183 | 0.8554 | 0.8991 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
okite97/distilbert-base-uncased-finetuned-zindi_tweets
2c5b7242b5317862dd95e0a020ff2abbb8338bf4
2022-06-29T15:36:27.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
okite97
null
okite97/distilbert-base-uncased-finetuned-zindi_tweets
3
null
transformers
22,654
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-zindi_tweets results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-zindi_tweets This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3203 - Accuracy: 0.9168 - F1: 0.9168 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.4224 | 1.0 | 67 | 0.2924 | 0.8894 | 0.8893 | | 0.2096 | 2.0 | 134 | 0.2632 | 0.9055 | 0.9055 | | 0.1329 | 3.0 | 201 | 0.2744 | 0.9102 | 0.9101 | | 0.1016 | 4.0 | 268 | 0.2868 | 0.9055 | 0.9054 | | 0.0752 | 5.0 | 335 | 0.2896 | 0.9140 | 0.9140 | | 0.0454 | 6.0 | 402 | 0.3077 | 0.9178 | 0.9178 | | 0.0305 | 7.0 | 469 | 0.3185 | 0.9149 | 0.9149 | | 0.0298 | 8.0 | 536 | 0.3203 | 0.9168 | 0.9168 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
ghadeermobasher/BioRed-CD-Modified-PubMedBERT-512
6c163c2a9b1c6759027e4a5cb2124d29b457b2bb
2022-06-29T17:46:02.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-CD-Modified-PubMedBERT-512
3
null
transformers
22,655
Entry not found
ghadeermobasher/BioRed-Chem-Modified-PubMedBERT-512
10f147ca2745f1d86bac0b990eab9a90d635c9f1
2022-06-29T19:23:49.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-Chem-Modified-PubMedBERT-512
3
null
transformers
22,656
Entry not found
ghadeermobasher/BioRed-CD-Original-PubMedBERT-512
75e162b89721b01ed5381b63cebf4a285ea9bb44
2022-06-29T18:09:06.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-CD-Original-PubMedBERT-512
3
null
transformers
22,657
Entry not found
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-512
57488196e41644ff9e5889e33f6c3be8d9a9d7d7
2022-06-29T19:29:35.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/BioRed-Chem-Original-PubMedBERT-512
3
null
transformers
22,658
Entry not found
edbeeching/decision-transformer-gym-hopper-expert-new
44a9b5ee3175c7c6af660c060e77e5b11f2d6f93
2022-06-29T19:11:44.000Z
[ "pytorch", "decision_transformer", "feature-extraction", "transformers" ]
feature-extraction
false
edbeeching
null
edbeeching/decision-transformer-gym-hopper-expert-new
3
null
transformers
22,659
Entry not found
Evelyn18/distilbert-base-uncased-becas-2
dc861b2b7763a70a370ac12d3f88b01836367ddf
2022-07-02T02:50:26.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:becasv2", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Evelyn18
null
Evelyn18/distilbert-base-uncased-becas-2
3
null
transformers
22,660
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becas-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becas-2 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 5.9506 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.1 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 5 | 5.9506 | | No log | 2.0 | 10 | 5.9506 | | No log | 3.0 | 15 | 5.9506 | | No log | 4.0 | 20 | 5.9506 | | No log | 5.0 | 25 | 5.9506 | | No log | 6.0 | 30 | 5.9506 | | No log | 7.0 | 35 | 5.9506 | | No log | 8.0 | 40 | 5.9506 | | No log | 9.0 | 45 | 5.9506 | | No log | 10.0 | 50 | 5.9506 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
skpawar1305/wav2vec2-large-xlsr-53-german-finetuned-ks-de
a35a3ef1de356eee68ab1c2c5813655433be72e6
2022-06-30T02:18:47.000Z
[ "pytorch", "tensorboard", "wav2vec2", "audio-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
audio-classification
false
skpawar1305
null
skpawar1305/wav2vec2-large-xlsr-53-german-finetuned-ks-de
3
null
transformers
22,661
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: wav2vec2-large-xlsr-53-german-finetuned-ks-de results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53-german-finetuned-ks-de This model is a fine-tuned version of [jonatasgrosman/wav2vec2-large-xlsr-53-german](https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-german) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.8681 - Accuracy: 0.6667 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 1 | 1.9490 | 0.0833 | | No log | 2.0 | 2 | 1.9128 | 0.25 | | No log | 3.0 | 3 | 1.8861 | 0.5833 | | No log | 4.0 | 4 | 1.8681 | 0.6667 | | No log | 5.0 | 5 | 1.8590 | 0.6667 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
andrescoro/my-three-class-sentiment-classification-RoBERTa
819ff8447c524f354a2953c0214bc60cd1170109
2022-06-30T02:24:14.000Z
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
false
andrescoro
null
andrescoro/my-three-class-sentiment-classification-RoBERTa
3
null
transformers
22,662
Entry not found
shahma/finetuned-bert-mrpc
653009a0dcfb304d5962633a29d7e746ba9ff01f
2022-06-30T16:00:47.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
shahma
null
shahma/finetuned-bert-mrpc
3
null
transformers
22,663
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy - f1 model-index: - name: finetuned-bert-mrpc results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: mrpc metrics: - name: Accuracy type: accuracy value: 0.8602941176470589 - name: F1 type: f1 value: 0.9032258064516129 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-bert-mrpc This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.4266 - Accuracy: 0.8603 - F1: 0.9032 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.5475 | 1.0 | 230 | 0.4024 | 0.8211 | 0.8785 | | 0.3309 | 2.0 | 460 | 0.3702 | 0.8529 | 0.8986 | | 0.1716 | 3.0 | 690 | 0.4266 | 0.8603 | 0.9032 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
SivilTaram/tapex-t5-small-lm-adapt
8a72ac33ba541513cab2960d168ec28f89f84b8a
2022-06-30T08:49:07.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "license:mit", "autotrain_compatible" ]
text2text-generation
false
SivilTaram
null
SivilTaram/tapex-t5-small-lm-adapt
3
null
transformers
22,664
--- license: mit ---
SivilTaram/tapex-t5-large-finetuned-wtq
b750d9aa334ff274aeff7397490ddc2d21b5664a
2022-06-30T09:04:53.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "license:mit", "autotrain_compatible" ]
text2text-generation
false
SivilTaram
null
SivilTaram/tapex-t5-large-finetuned-wtq
3
null
transformers
22,665
--- license: mit ---
asahi417/lmqg-mbart-large-cc25-itquad
fdb21e9cd1e264cebace34822bbb4e6f7264f7c9
2022-06-30T14:10:06.000Z
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
asahi417
null
asahi417/lmqg-mbart-large-cc25-itquad
3
null
transformers
22,666
Entry not found
yaakov/test-distilbert-to-cola
e1b062bd9b3375d8a4b4738e0c863359a0ed2686
2022-06-30T15:43:07.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
yaakov
null
yaakov/test-distilbert-to-cola
3
null
transformers
22,667
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: test-distilbert-to-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5443893754588841 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test-distilbert-to-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.5410 - Matthews Correlation: 0.5444 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5244 | 1.0 | 535 | 0.5352 | 0.4122 | | 0.348 | 2.0 | 1070 | 0.4897 | 0.5169 | | 0.2315 | 3.0 | 1605 | 0.5410 | 0.5444 | | 0.177 | 4.0 | 2140 | 0.7533 | 0.5177 | | 0.1338 | 5.0 | 2675 | 0.8129 | 0.5384 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
vishwasgautam/wav2vec2-base-libriSpeech-demo-colab
dc11a07be4bd81117d1a38be83da383b923eb485
2022-07-15T14:01:47.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
vishwasgautam
null
vishwasgautam/wav2vec2-base-libriSpeech-demo-colab
3
null
transformers
22,668
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-libriSpeech-demo-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-libriSpeech-demo-colab This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4627 - Wer: 0.3174 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 4.2349 | 13.51 | 500 | 3.1154 | 1.0 | | 1.5 | 27.03 | 1000 | 0.4627 | 0.3174 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
scottstots/roberta-base-prop-16-train-set
5d8984b5c7595a41ecf73220e9e778f668f6c1f2
2022-07-22T20:18:31.000Z
[ "pytorch", "tensorboard", "roberta", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
scottstots
null
scottstots/roberta-base-prop-16-train-set
3
null
transformers
22,669
--- license: mit tags: - generated_from_trainer model-index: - name: roberta-base-prop-16-train-set results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-prop-16-train-set This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Evelyn18/distilbert-base-uncased-becas-0
aa2db5a8c5202fc200c8655e61aa53466f1741de
2022-07-01T18:34:35.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:becasv2", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Evelyn18
null
Evelyn18/distilbert-base-uncased-becas-0
3
null
transformers
22,670
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becas-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becas-0 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 5.2904 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 5 | 5.6445 | | No log | 2.0 | 10 | 5.3875 | | No log | 3.0 | 15 | 5.2904 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
vishwasgautam/HuBERT-base-libriSpeech-demo-colab
cdfdf01605384a58680850c7dfbc488790203d53
2022-07-02T05:24:24.000Z
[ "pytorch", "tensorboard", "hubert", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
vishwasgautam
null
vishwasgautam/HuBERT-base-libriSpeech-demo-colab
3
null
transformers
22,671
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: HuBERT-base-libriSpeech-demo-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # HuBERT-base-libriSpeech-demo-colab This model is a fine-tuned version of [facebook/hubert-large-ls960-ft](https://huggingface.co/facebook/hubert-large-ls960-ft) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1456 - Wer: 0.2443 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 7.6395 | 13.51 | 500 | 3.1933 | 0.9930 | | 2.5994 | 27.03 | 1000 | 0.1456 | 0.2443 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
gianlab/swin-tiny-patch4-window7-224-finetuned-skin-cancer
b3b174a3d59e2b1129afe0409fc432795c3c5545
2022-07-02T08:35:24.000Z
[ "pytorch", "tensorboard", "swin", "image-classification", "dataset:imagefolder", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
image-classification
false
gianlab
null
gianlab/swin-tiny-patch4-window7-224-finetuned-skin-cancer
3
null
transformers
22,672
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-finetuned-skin-cancer results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder args: default metrics: - name: Accuracy type: accuracy value: 0.7275449101796407 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-skin-cancer This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.7695 - Accuracy: 0.7275 ## Model description This model was created by importing the dataset of the photos of skin cancer into Google Colab from kaggle here: https://www.kaggle.com/datasets/kmader/skin-cancer-mnist-ham10000 . I then used the image classification tutorial here: https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb The possible classified diseases are: 'Actinic-keratoses', 'Basal-cell-carcinoma', 'Benign-keratosis-like-lesions', 'Dermatofibroma', 'Melanocytic-nevi', 'Melanoma', 'Vascular-lesions' . ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6911 | 0.99 | 70 | 0.7695 | 0.7275 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
erickfm/leafy-sweep-1
5f0d06e2c447245986cb50e0a502393dc564bbc1
2022-07-02T11:31:16.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/leafy-sweep-1
3
null
transformers
22,673
Entry not found
erickfm/clear-sweep-1
d77baf4a3f720b93808e94a3b29c29edef5841e9
2022-07-02T12:56:50.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/clear-sweep-1
3
null
transformers
22,674
Entry not found
erickfm/proud-sweep-1
0f94280b2326dad98f2a3722f76515e5c36f12b8
2022-07-03T01:32:22.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
erickfm
null
erickfm/proud-sweep-1
3
null
transformers
22,675
Entry not found
haddadalwi/distilbert-base-uncased-finetuned-squad
a802049c339c7a2ab10da9c7daaa34cf5d58c57e
2022-07-03T12:44:32.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:squad_v2", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
haddadalwi
null
haddadalwi/distilbert-base-uncased-finetuned-squad
3
null
transformers
22,676
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad_v2 model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad_v2 dataset. It achieves the following results on the evaluation set: - Loss: 5.5273 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 10 | 5.6821 | | No log | 2.0 | 20 | 5.5273 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
mf99/autotrain-sum-200-random-1082438930
8687184a5b2b800ebc936b8b3681446b0784098e
2022-07-04T07:26:22.000Z
[ "pytorch", "bart", "text2text-generation", "en", "dataset:mf99/autotrain-data-sum-200-random", "transformers", "autotrain", "co2_eq_emissions", "autotrain_compatible" ]
text2text-generation
false
mf99
null
mf99/autotrain-sum-200-random-1082438930
3
null
transformers
22,677
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - mf99/autotrain-data-sum-200-random co2_eq_emissions: 4.994502035089263 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 1082438930 - CO2 Emissions (in grams): 4.994502035089263 ## Validation Metrics - Loss: 0.44043827056884766 - Rouge1: 78.4534 - Rouge2: 73.6511 - RougeL: 78.2595 - RougeLsum: 78.2561 - Gen Len: 17.2448 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/mf99/autotrain-sum-200-random-1082438930 ```
fce-m72109/mascorpus-bert-classifier
26a4bfac1659b75c33cc643228c8c4c5c8b5f954
2022-07-03T22:36:48.000Z
[ "pytorch", "bert", "text-classification", "transformers", "license:mit" ]
text-classification
false
fce-m72109
null
fce-m72109/mascorpus-bert-classifier
3
null
transformers
22,678
--- license: mit ---
dexay/f_ner_rober
deb3f5156ea75ce9fba6a5fd4369ccc2712b5da6
2022-07-03T22:54:41.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
dexay
null
dexay/f_ner_rober
3
null
transformers
22,679
Entry not found
asahi417/lmqg-mbart-large-cc25-ruquad
5ddf3f3df125f35d32c2cd8b102b4f14cf291491
2022-07-04T04:41:48.000Z
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
asahi417
null
asahi417/lmqg-mbart-large-cc25-ruquad
3
null
transformers
22,680
Entry not found
theojolliffe/t5-small-fb
17f5acb44cf7c75daf52b7c19f7bb6f1cd16a2b1
2022-07-04T14:36:46.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
theojolliffe
null
theojolliffe/t5-small-fb
3
null
transformers
22,681
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-small-fb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-fb This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | No log | 1.0 | 237 | 1.5946 | 50.8607 | 34.41 | 46.7706 | 48.1561 | 18.2917 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
asahi417/lmqg-mbart-large-cc25-frquad
8dacbe150ca6166508b76fdda40fc10e07048775
2022-07-04T22:55:51.000Z
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
asahi417
null
asahi417/lmqg-mbart-large-cc25-frquad
3
null
transformers
22,682
Entry not found
Eleven/xlm-roberta-base-finetuned-panx-de-fr
6bcfd60ea6acdacc4142cb349de39157ab97cf3c
2022-07-05T15:59:42.000Z
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
Eleven
null
Eleven/xlm-roberta-base-finetuned-panx-de-fr
3
null
transformers
22,683
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de-fr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1644 - F1: 0.8617 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2891 | 1.0 | 715 | 0.1780 | 0.8288 | | 0.1471 | 2.0 | 1430 | 0.1627 | 0.8509 | | 0.0947 | 3.0 | 2145 | 0.1644 | 0.8617 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
sileod/roberta-base-random
1c5602d5723c8aeb06f6362f2704a3e9bc000ba5
2022-07-05T18:05:49.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
sileod
null
sileod/roberta-base-random
3
null
transformers
22,684
Entry not found
Hamzaaa/wav2vec2-base-finetuned-Tess-finetuned-Tess
f554af3e8b200762120d767cfe78743bc02699ba
2022-07-07T09:52:41.000Z
[ "pytorch", "tensorboard", "wav2vec2", "audio-classification", "transformers" ]
audio-classification
false
Hamzaaa
null
Hamzaaa/wav2vec2-base-finetuned-Tess-finetuned-Tess
3
null
transformers
22,685
Entry not found
Hamzaaa/wav2vec2-base-finetuned-test-words
8be34d0a3f62316191f731403687d96b55d74f46
2022-07-05T20:16:49.000Z
[ "pytorch", "tensorboard", "wav2vec2", "audio-classification", "transformers" ]
audio-classification
false
Hamzaaa
null
Hamzaaa/wav2vec2-base-finetuned-test-words
3
null
transformers
22,686
Entry not found
tner/twitter-roberta-base-2019-90m-tweetner-2021
50e9c09d36d02824e389d2d0d79e5e804f955bca
2022-07-07T10:11:33.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
tner
null
tner/twitter-roberta-base-2019-90m-tweetner-2021
3
null
transformers
22,687
Entry not found
tner/twitter-roberta-base-dec2020-tweetner-2021
05af0fe879d8cf192137aa3c547a46aad9fd9083
2022-07-07T10:12:08.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
tner
null
tner/twitter-roberta-base-dec2020-tweetner-2021
3
null
transformers
22,688
Entry not found
tner/twitter-roberta-base-2019-90m-tweetner-2020-2021-concat
8d2df72dae413cedfaedcbe38bc030e21690dc9a
2022-07-07T10:22:24.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
tner
null
tner/twitter-roberta-base-2019-90m-tweetner-2020-2021-concat
3
null
transformers
22,689
Entry not found
tner/twitter-roberta-base-dec2020-tweetner-2020-2021-concat
aff67747652f0766ab7e575962faf295ca7a3e56
2022-07-07T10:23:30.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
tner
null
tner/twitter-roberta-base-dec2020-tweetner-2020-2021-concat
3
null
transformers
22,690
Entry not found
tner/twitter-roberta-base-2019-90m-tweetner-2020-2021-continuous
b40f923cd42ebc3ccb9a40ece69eda38912c27b4
2022-07-11T22:17:17.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
tner
null
tner/twitter-roberta-base-2019-90m-tweetner-2020-2021-continuous
3
null
transformers
22,691
Entry not found
tner/twitter-roberta-base-dec2020-tweetner-2020-2021-continuous
79b68b98307a198fe230f0c29436ef6f1cb8d44f
2022-07-11T23:54:27.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
tner
null
tner/twitter-roberta-base-dec2020-tweetner-2020-2021-continuous
3
null
transformers
22,692
Entry not found
jonatasgrosman/exp_w2v2t_en_vp-sv_s179
f672b01701ace6d5bbb3904896029453230a3f9b
2022-07-08T06:02:23.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "transformers", "license:apache-2.0" ]
automatic-speech-recognition
false
jonatasgrosman
null
jonatasgrosman/exp_w2v2t_en_vp-sv_s179
3
null
transformers
22,693
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_vp-sv_s179 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jourlin/wiki2json
0540686bbca1a5c707db05190c581069d0c3ebe2
2022-07-08T11:46:44.000Z
[ "pytorch", "t5", "text2text-generation", "dataset:opus_books", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
jourlin
null
jourlin/wiki2json
3
null
transformers
22,694
--- license: apache-2.0 tags: - generated_from_trainer datasets: - opus_books metrics: - bleu model-index: - name: wiki2json results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: opus_books type: opus_books args: en-fr metrics: - name: Bleu type: bleu value: 4.8968 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wiki2json This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the opus_books dataset. It achieves the following results on the evaluation set: - Loss: 1.6848 - Bleu: 4.8968 - Gen Len: 17.6362 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:| | 1.9187 | 1.0 | 3178 | 1.6848 | 4.8968 | 17.6362 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
jonatasgrosman/exp_w2v2t_th_unispeech_s131
770beff8e799285a8d0eb5e7aa0908a9235480db
2022-07-08T10:45:46.000Z
[ "pytorch", "unispeech", "automatic-speech-recognition", "th", "dataset:mozilla-foundation/common_voice_7_0", "transformers", "license:apache-2.0" ]
automatic-speech-recognition
false
jonatasgrosman
null
jonatasgrosman/exp_w2v2t_th_unispeech_s131
3
null
transformers
22,695
--- language: - th license: apache-2.0 tags: - automatic-speech-recognition - th datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_th_unispeech_s131 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition on Thai using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_th_unispeech-ml_s256
8b0ad74d2fc498e48b510ffb9382903594d16cf4
2022-07-08T11:28:09.000Z
[ "pytorch", "unispeech", "automatic-speech-recognition", "th", "dataset:mozilla-foundation/common_voice_7_0", "transformers", "license:apache-2.0" ]
automatic-speech-recognition
false
jonatasgrosman
null
jonatasgrosman/exp_w2v2t_th_unispeech-ml_s256
3
null
transformers
22,696
--- language: - th license: apache-2.0 tags: - automatic-speech-recognition - th datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_th_unispeech-ml_s256 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition on Thai using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jk-gjom/autotrain-jk123-1105140277
7cf5f2adaf5b7bf7c44086fb810f1fdae9c75fa6
2022-07-08T13:22:03.000Z
[ "pytorch", "bert", "text-classification", "unk", "dataset:jk-gjom/autotrain-data-jk123", "transformers", "autotrain", "co2_eq_emissions" ]
text-classification
false
jk-gjom
null
jk-gjom/autotrain-jk123-1105140277
3
null
transformers
22,697
--- tags: autotrain language: unk widget: - text: "I love AutoTrain 🤗" datasets: - jk-gjom/autotrain-data-jk123 co2_eq_emissions: 0.1863935648335355 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1105140277 - CO2 Emissions (in grams): 0.1863935648335355 ## Validation Metrics - Loss: 0.0680043175816536 - Accuracy: 0.9808 - Macro F1: 0.9808013970263609 - Micro F1: 0.9808 - Weighted F1: 0.9808013970263609 - Macro Precision: 0.9808207901614748 - Micro Precision: 0.9808 - Weighted Precision: 0.9808207901614749 - Macro Recall: 0.9808 - Micro Recall: 0.9808 - Weighted Recall: 0.9808 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/jk-gjom/autotrain-jk123-1105140277 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("jk-gjom/autotrain-jk123-1105140277", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("jk-gjom/autotrain-jk123-1105140277", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
nielsr/videomae-base-finetuned-kinetics
972cbe52fa2d39fc3aa7c4b9d7012a04bb7f094a
2022-07-08T15:01:41.000Z
[ "pytorch", "videomae", "transformers" ]
null
false
nielsr
null
nielsr/videomae-base-finetuned-kinetics
3
null
transformers
22,698
Entry not found
tner/bertweet-base-tweetner-2021
abca2f4e997423cbdf33b8307de950f9f1cf4e57
2022-07-09T21:17:16.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
tner
null
tner/bertweet-base-tweetner-2021
3
null
transformers
22,699
Entry not found