modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
doraemon1998/opus-mt-en-ro-finetuned-en-to-ro
a40c0c9761083fcc419f572600d1beaf236e4ac1
2022-07-15T00:44:03.000Z
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
doraemon1998
null
doraemon1998/opus-mt-en-ro-finetuned-en-to-ro
2
null
transformers
27,400
Entry not found
MarLac/wav2vec2-base-timit-demo-google-colab
329495bcd0653e49e5460ffbe695205765e3c159
2022-07-12T15:41:51.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
MarLac
null
MarLac/wav2vec2-base-timit-demo-google-colab
2
null
transformers
27,401
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-google-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-google-colab This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5816 - Wer: 0.3533 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 2.243 | 0.5 | 500 | 1.0798 | 0.7752 | | 0.834 | 1.01 | 1000 | 0.6206 | 0.5955 | | 0.5503 | 1.51 | 1500 | 0.5387 | 0.5155 | | 0.4548 | 2.01 | 2000 | 0.4660 | 0.4763 | | 0.3412 | 2.51 | 2500 | 0.8381 | 0.4836 | | 0.3128 | 3.02 | 3000 | 0.4818 | 0.4519 | | 0.2547 | 3.52 | 3500 | 0.4415 | 0.4230 | | 0.2529 | 4.02 | 4000 | 0.4624 | 0.4219 | | 0.2103 | 4.52 | 4500 | 0.4714 | 0.4096 | | 0.2102 | 5.03 | 5000 | 0.4968 | 0.4087 | | 0.1838 | 5.53 | 5500 | 0.4643 | 0.4131 | | 0.1721 | 6.03 | 6000 | 0.4676 | 0.3979 | | 0.1548 | 6.53 | 6500 | 0.4765 | 0.4085 | | 0.1595 | 7.04 | 7000 | 0.4797 | 0.3941 | | 0.1399 | 7.54 | 7500 | 0.4753 | 0.3902 | | 0.1368 | 8.04 | 8000 | 0.4697 | 0.3945 | | 0.1276 | 8.54 | 8500 | 0.5438 | 0.3869 | | 0.1255 | 9.05 | 9000 | 0.5660 | 0.3841 | | 0.1077 | 9.55 | 9500 | 0.4964 | 0.3947 | | 0.1197 | 10.05 | 10000 | 0.5349 | 0.3849 | | 0.1014 | 10.55 | 10500 | 0.5558 | 0.3883 | | 0.0949 | 11.06 | 11000 | 0.5673 | 0.3785 | | 0.0882 | 11.56 | 11500 | 0.5589 | 0.3955 | | 0.0906 | 12.06 | 12000 | 0.5752 | 0.4120 | | 0.1064 | 12.56 | 12500 | 0.5080 | 0.3727 | | 0.0854 | 13.07 | 13000 | 0.5398 | 0.3798 | | 0.0754 | 13.57 | 13500 | 0.5237 | 0.3816 | | 0.0791 | 14.07 | 14000 | 0.4967 | 0.3725 | | 0.0731 | 14.57 | 14500 | 0.5287 | 0.3744 | | 0.0719 | 15.08 | 15000 | 0.5633 | 0.3596 | | 0.062 | 15.58 | 15500 | 0.5399 | 0.3752 | | 0.0681 | 16.08 | 16000 | 0.5151 | 0.3759 | | 0.0559 | 16.58 | 16500 | 0.5564 | 0.3709 | | 0.0533 | 17.09 | 17000 | 0.5933 | 0.3743 | | 0.0563 | 17.59 | 17500 | 0.5381 | 0.3670 | | 0.0527 | 18.09 | 18000 | 0.5685 | 0.3731 | | 0.0492 | 18.59 | 18500 | 0.5728 | 0.3725 | | 0.0509 | 19.1 | 19000 | 0.6074 | 0.3807 | | 0.0436 | 19.6 | 19500 | 0.5762 | 0.3628 | | 0.0434 | 20.1 | 20000 | 0.6721 | 0.3729 | | 0.0416 | 20.6 | 20500 | 0.5842 | 0.3700 | | 0.0431 | 21.11 | 21000 | 0.5374 | 0.3607 | | 0.037 | 21.61 | 21500 | 0.5556 | 0.3667 | | 0.036 | 22.11 | 22000 | 0.5608 | 0.3592 | | 0.04 | 22.61 | 22500 | 0.5272 | 0.3637 | | 0.047 | 23.12 | 23000 | 0.5234 | 0.3625 | | 0.0506 | 23.62 | 23500 | 0.5427 | 0.3629 | | 0.0418 | 24.12 | 24000 | 0.5590 | 0.3626 | | 0.037 | 24.62 | 24500 | 0.5615 | 0.3555 | | 0.0429 | 25.13 | 25000 | 0.5806 | 0.3616 | | 0.045 | 25.63 | 25500 | 0.5777 | 0.3639 | | 0.0283 | 26.13 | 26000 | 0.5987 | 0.3617 | | 0.0253 | 26.63 | 26500 | 0.5671 | 0.3551 | | 0.032 | 27.14 | 27000 | 0.5464 | 0.3582 | | 0.0321 | 27.64 | 27500 | 0.5634 | 0.3573 | | 0.0274 | 28.14 | 28000 | 0.5513 | 0.3575 | | 0.0245 | 28.64 | 28500 | 0.5745 | 0.3537 | | 0.0251 | 29.15 | 29000 | 0.5759 | 0.3547 | | 0.0222 | 29.65 | 29500 | 0.5816 | 0.3533 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 1.18.3 - Tokenizers 0.12.1
rajat99/Fine_Tuning_XLSR_300M_testing_model
8d719b0d276783c3c1c98d8aa4e33eecde2d4072
2022-07-12T12:00:41.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
rajat99
null
rajat99/Fine_Tuning_XLSR_300M_testing_model
2
null
transformers
27,402
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: Fine_Tuning_XLSR_300M_testing_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Fine_Tuning_XLSR_300M_testing_model This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.2861 - Wer: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:---:| | 5.5178 | 23.53 | 400 | 3.2861 | 1.0 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
ghadeermobasher/Modified_BiomedNLP-PubMedBERT-base-uncased-abstract_BioRED-Dis-512-5-30
a7757c3fbf34a0e17eab4c91cd3daee884e632a8
2022-07-13T11:19:11.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified_BiomedNLP-PubMedBERT-base-uncased-abstract_BioRED-Dis-512-5-30
2
null
transformers
27,403
ghadeermobasher/Original-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED_Dis-320-8-10
e9eed7ef97bbfb12969d3571dda92068909069b8
2022-07-12T14:44:53.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED_Dis-320-8-10
2
null
transformers
27,404
Entry not found
ghadeermobasher/Modified-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-Dis-320-8-10
3766b27fab2f054430c12fede4242769f66f9464
2022-07-12T14:50:58.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-Dis-320-8-10
2
null
transformers
27,405
Entry not found
andreaschandra/xlm-roberta-base-finetuned-panx-de-fr
9cde38ac5b1a310054ea4339df05f6627dc876ce
2022-07-12T15:05:50.000Z
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
andreaschandra
null
andreaschandra/xlm-roberta-base-finetuned-panx-de-fr
2
null
transformers
27,406
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de-fr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1619 - F1: 0.8599 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2851 | 1.0 | 715 | 0.1792 | 0.8239 | | 0.149 | 2.0 | 1430 | 0.1675 | 0.8401 | | 0.0955 | 3.0 | 2145 | 0.1619 | 0.8599 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-CD-320-8-10
05c8d91042e35d598cf7bc0c409d11bd432145b6
2022-07-12T15:23:48.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-CD-320-8-10
2
null
transformers
27,407
Entry not found
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-CD-320-8-10
d9ae3a9edc03b897918254e7ba72cc21ac3afb40
2022-07-12T15:37:22.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-CD-320-8-10
2
null
transformers
27,408
Entry not found
andreaschandra/xlm-roberta-base-finetuned-panx-fr
b1f381b7abf4e16732067d5f599c680daac9b91f
2022-07-12T15:30:15.000Z
[ "pytorch", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
andreaschandra
null
andreaschandra/xlm-roberta-base-finetuned-panx-fr
2
null
transformers
27,409
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-fr results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.fr metrics: - name: F1 type: f1 value: 0.9275221167113059 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1059 - F1: 0.9275 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.5416 | 1.0 | 191 | 0.2322 | 0.8378 | | 0.2614 | 2.0 | 382 | 0.1544 | 0.8866 | | 0.1758 | 3.0 | 573 | 0.1059 | 0.9275 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
huggingtweets/masonhaggerty
eabfd05506e6af632c90cd12270323ed4d7042ea
2022-07-12T17:17:06.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/masonhaggerty
2
null
transformers
27,410
--- language: en thumbnail: http://www.huggingtweets.com/masonhaggerty/1657646221015/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1410026132121047041/LiYev7vQ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Mason Haggerty</div> <div style="text-align: center; font-size: 14px;">@masonhaggerty</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Mason Haggerty. | Data | Mason Haggerty | | --- | --- | | Tweets downloaded | 785 | | Retweets | 71 | | Short tweets | 82 | | Tweets kept | 632 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/jpav9nmg/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @masonhaggerty's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/bs6k2tzz) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/bs6k2tzz/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/masonhaggerty') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Li-Tang/rare-puppers
0ace0043db173cf1394d5e8faa2739f3e09ddbfd
2022-07-12T16:57:55.000Z
[ "pytorch", "tensorboard", "vit", "image-classification", "transformers", "huggingpics", "model-index" ]
image-classification
false
Li-Tang
null
Li-Tang/rare-puppers
2
null
transformers
27,411
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9701492786407471 --- # rare-puppers Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
huggingtweets/ydouright
5251cbea6fa152b18a1adb0f3ceff5b12f02bb08
2022-07-12T20:15:17.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/ydouright
2
null
transformers
27,412
--- language: en thumbnail: http://www.huggingtweets.com/ydouright/1657656913047/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1506510453286924293/NXf3sNMH_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">ethans.data</div> <div style="text-align: center; font-size: 14px;">@ydouright</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from ethans.data. | Data | ethans.data | | --- | --- | | Tweets downloaded | 3245 | | Retweets | 119 | | Short tweets | 572 | | Tweets kept | 2554 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1vfnsep8/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @ydouright's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3f5l1flk) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3f5l1flk/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/ydouright') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
huggingtweets/dylanfromsf
a72b30ea80ee8a48647f5a7ffff8b0da195968c5
2022-07-12T20:29:49.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/dylanfromsf
2
null
transformers
27,413
--- language: en thumbnail: http://www.huggingtweets.com/dylanfromsf/1657657784578/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1384643526772678657/O7Sz_ZxW_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">dylan</div> <div style="text-align: center; font-size: 14px;">@dylanfromsf</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from dylan. | Data | dylan | | --- | --- | | Tweets downloaded | 1288 | | Retweets | 116 | | Short tweets | 420 | | Tweets kept | 752 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2526mmm1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @dylanfromsf's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2ds3020w) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2ds3020w/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/dylanfromsf') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
rvignav/biobert-finetuned-prior-rmv
c0911ad2552712b8c867361d634bac7867d5aa72
2022-07-12T23:17:51.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
rvignav
null
rvignav/biobert-finetuned-prior-rmv
2
null
transformers
27,414
Entry not found
Team-PIXEL/pixel-base-finetuned-pos-ud-japanese-gsd
f2d45d9f2798b0f6ab6d693d89fceb08dacec2b1
2022-07-13T01:14:04.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-pos-ud-japanese-gsd
2
null
transformers
27,415
Entry not found
Team-PIXEL/pixel-base-finetuned-pos-ud-tamil-ttb
02852385055f84f71b7bc1e2a127334568f81097
2022-07-13T01:26:57.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-pos-ud-tamil-ttb
2
null
transformers
27,416
Entry not found
Team-PIXEL/pixel-base-finetuned-parsing-ud-arabic-padt
9b90ea609a05f4ec5405a2031525b674faf3b4ef
2022-07-13T01:45:53.000Z
[ "pytorch", "pixel", "transformers" ]
null
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-parsing-ud-arabic-padt
2
null
transformers
27,417
Entry not found
dafraile/Clini-dialog-sum-BART
b7b7fc5f6f00040b42ffc3ae39ea790a85d66f62
2022-07-19T05:12:30.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
dafraile
null
dafraile/Clini-dialog-sum-BART
2
null
transformers
27,418
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: tst-summarization results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tst-summarization This model is a fine-tuned version of [philschmid/bart-large-cnn-samsum](https://huggingface.co/philschmid/bart-large-cnn-samsum) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.9975 - Rouge1: 56.239 - Rouge2: 28.9873 - Rougel: 38.5242 - Rougelsum: 53.7902 - Gen Len: 105.2973 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.18.0.dev0 - Pytorch 1.10.0 - Datasets 1.18.4 - Tokenizers 0.11.6
huggingtweets/burdeevt
bee2119429e4eee2668ba0dd5978867e8d6a50eb
2022-07-13T04:15:34.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/burdeevt
2
null
transformers
27,419
--- language: en thumbnail: http://www.huggingtweets.com/burdeevt/1657685656540/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1542316332972228608/Hs2WAuIA_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Burdee 🐣💖</div> <div style="text-align: center; font-size: 14px;">@burdeevt</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Burdee 🐣💖. | Data | Burdee 🐣💖 | | --- | --- | | Tweets downloaded | 2715 | | Retweets | 1903 | | Short tweets | 252 | | Tweets kept | 560 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/37eoz4i5/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @burdeevt's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2t35juo3) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2t35juo3/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/burdeevt') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
nawta/wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_2
9a8addc7b0adafb39fa3d3b9321adcbdd1e6ec4c
2022-07-13T10:11:43.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "model-index" ]
automatic-speech-recognition
false
nawta
null
nawta/wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_2
2
null
transformers
27,420
--- tags: - generated_from_trainer model-index: - name: wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_2 This model is a fine-tuned version of [/root/workspace/wav2vec2-pretrained_with_ESC50_10000epochs_32batch_2022-07-09_22-16-46/pytorch_model.bin](https://huggingface.co//root/workspace/wav2vec2-pretrained_with_ESC50_10000epochs_32batch_2022-07-09_22-16-46/pytorch_model.bin) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.6235 - Cer: 0.8973 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Cer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.0097 | 23.81 | 500 | 2.6235 | 0.8973 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.9.1+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-Dis-320-8-10
ea644d841d3eb7fd9a424924f4216ba3161568aa
2022-07-13T11:59:29.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-Dis-320-8-10
2
null
transformers
27,421
Entry not found
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-CD-320-8-10
8d34ea65a212b2510a1312cd2deb80d69deffd33
2022-07-13T12:06:48.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-CD-320-8-10
2
null
transformers
27,422
Entry not found
nawta/wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_3
a5cceff2c986538d1916b64203154a1de52d2115
2022-07-13T14:03:36.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "model-index" ]
automatic-speech-recognition
false
nawta
null
nawta/wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_3
2
null
transformers
27,423
--- tags: - generated_from_trainer model-index: - name: wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_3 This model is a fine-tuned version of [/root/workspace/wav2vec2-pretrained_with_ESC50_10000epochs_32batch_2022-07-09_22-16-46/pytorch_model.bin](https://huggingface.co//root/workspace/wav2vec2-pretrained_with_ESC50_10000epochs_32batch_2022-07-09_22-16-46/pytorch_model.bin) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.5350 - Cer: 1.2730 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Cer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 4.4243 | 4.67 | 500 | 2.6901 | 1.1259 | | 2.4282 | 9.35 | 1000 | 2.7495 | 1.1563 | | 2.3377 | 14.02 | 1500 | 2.2475 | 0.9617 | | 2.2434 | 18.69 | 2000 | 2.2765 | 1.1908 | | 2.2731 | 23.36 | 2500 | 2.2574 | 1.1669 | | 2.3436 | 28.04 | 3000 | 2.5350 | 1.2730 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.9.1+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3
ghadeermobasher/Modified-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-CD-320-8-10
32773e25fd018e6a0232483d9ddaa8ab66e53b3a
2022-07-13T12:21:34.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-CD-320-8-10
2
null
transformers
27,424
Entry not found
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-CD-128-32-30
98cf6e3d0c07fe2398ac679070f2184a41b589b0
2022-07-13T12:27:12.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-CD-128-32-30
2
null
transformers
27,425
Entry not found
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-CD-256-16-5
192306ec5710678a33d08972470fd69fee7fe1ab
2022-07-13T12:12:19.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-CD-256-16-5
2
null
transformers
27,426
Entry not found
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-CD-384-5-20
5ae3e1e0c4d523c7642b9920eb9460444b1a7d77
2022-07-13T13:16:29.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-CD-384-5-20
2
null
transformers
27,427
Entry not found
jordyvl/udpos28-sm-all-POS
6d40f37e3bde41367174b6b7b69fd5ae0056c902
2022-07-13T12:23:52.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "dataset:udpos28", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
jordyvl
null
jordyvl/udpos28-sm-all-POS
2
null
transformers
27,428
--- license: apache-2.0 tags: - generated_from_trainer datasets: - udpos28 metrics: - precision - recall - f1 - accuracy model-index: - name: udpos28-sm-all-POS results: - task: name: Token Classification type: token-classification dataset: name: udpos28 type: udpos28 args: en metrics: - name: Precision type: precision value: 0.9586517032792105 - name: Recall type: recall value: 0.9588997472284696 - name: F1 type: f1 value: 0.9587757092110369 - name: Accuracy type: accuracy value: 0.964820639556654 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # udpos28-sm-all-POS This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the udpos28 dataset. It achieves the following results on the evaluation set: - Loss: 0.1479 - Precision: 0.9587 - Recall: 0.9589 - F1: 0.9588 - Accuracy: 0.9648 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.1261 | 1.0 | 4978 | 0.1358 | 0.9513 | 0.9510 | 0.9512 | 0.9581 | | 0.0788 | 2.0 | 9956 | 0.1326 | 0.9578 | 0.9578 | 0.9578 | 0.9642 | | 0.0424 | 3.0 | 14934 | 0.1479 | 0.9587 | 0.9589 | 0.9588 | 0.9648 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.2.2 - Tokenizers 0.12.1
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-CD-128-32-30
71ecbd6af8c7ac1a343e37cc168c0691609c4afb
2022-07-13T12:42:18.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-CD-128-32-30
2
null
transformers
27,429
Entry not found
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-CD-256-16-5
ac9ddca8a471304a21c11027a6b5962c63d94161
2022-07-13T12:26:52.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-CD-256-16-5
2
null
transformers
27,430
Entry not found
ghadeermobasher/Original-biobert-v1.1-BioRED-CD-384-5-20
f5ae27633a21b7fd99c95d07d2c61e505364501f
2022-07-13T13:31:32.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-biobert-v1.1-BioRED-CD-384-5-20
2
null
transformers
27,431
Entry not found
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-CD-384-5-20
ab60c0de23806b3d38de9b32fb40cd5eb075dff3
2022-07-13T13:33:15.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-CD-384-5-20
2
null
transformers
27,432
Entry not found
ghadeermobasher/Modified-biobert-v1.1-BioRED-CD-384-5-20
e717e5b5851461eef677c1046e74ea2e83863f18
2022-07-13T13:49:04.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-biobert-v1.1-BioRED-CD-384-5-20
2
null
transformers
27,433
Entry not found
ghadeermobasher/Original-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-256-16-5
e71cc4f0b67e28ef72fa3cc3a382c56237949e68
2022-07-13T13:11:50.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-256-16-5
2
null
transformers
27,434
Entry not found
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-256-16-5
ba0fa12b62e4f97d6a223375bc766e06e2375ffb
2022-07-13T13:12:08.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-256-16-5
2
null
transformers
27,435
Entry not found
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-128-32-30
d2bf6765b7492c4e2499861ce962a70e854f8fe5
2022-07-13T13:08:49.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-128-32-30
2
null
transformers
27,436
Entry not found
ghadeermobasher/Original-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-128-32-30
f1a1a8b011f9ce97504c1c1a3539f4d0ad716eb1
2022-07-13T13:31:54.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-128-32-30
2
null
transformers
27,437
Entry not found
ghadeermobasher/Original-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-384-8-10
3898a063feee67480a7f50c8b026fc793a1b5961
2022-07-13T13:40:02.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-384-8-10
2
null
transformers
27,438
Entry not found
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-384-8-10
ebe97c36431ba3fcee27f2696c8155f40dadd1ae
2022-07-13T13:40:44.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-CD-384-8-10
2
null
transformers
27,439
Entry not found
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED_Dis-320-8-10
f792738046856699ca85b9e0a6871b1638bc28a4
2022-07-13T13:55:33.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED_Dis-320-8-10
2
null
transformers
27,440
Entry not found
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED_Dis-256-16-5
812b5021ab0b683c4cb0def0e779aec81dddd637
2022-07-13T13:38:03.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED_Dis-256-16-5
2
null
transformers
27,441
Entry not found
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-Dis-256-16-5
cd7a2a65a6acb6b7191379ff9db82f5302130316
2022-07-13T13:38:11.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-Dis-256-16-5
2
null
transformers
27,442
Entry not found
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED_Dis-384-8-10
2afc34c8f047cd9d11e0e62d489239ef31d5b603
2022-07-13T14:03:38.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED_Dis-384-8-10
2
null
transformers
27,443
Entry not found
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-Dis-384-8-10
88dfa6d69d519056c502102505d6f663550f99e5
2022-07-13T14:04:21.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED-Dis-384-8-10
2
null
transformers
27,444
Entry not found
KeLiu/QETRA_PHP
0058cd958b387eaea66771b09189a5fdfb4c9c0c
2022-07-13T13:38:52.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
KeLiu
null
KeLiu/QETRA_PHP
2
null
transformers
27,445
Entry not found
nawta/wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_5
caa30638c48325cc3825facdaa7b5c46d60958b3
2022-07-13T14:43:29.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "model-index" ]
automatic-speech-recognition
false
nawta
null
nawta/wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_5
2
null
transformers
27,446
--- tags: - generated_from_trainer model-index: - name: wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-onomatopoeia-finetune_smalldata_ESC50pretrained_5 This model is a fine-tuned version of [/root/workspace/wav2vec2-pretrained_with_ESC50_10000epochs_32batch_2022-07-09_22-16-46/pytorch_model.bin](https://huggingface.co//root/workspace/wav2vec2-pretrained_with_ESC50_10000epochs_32batch_2022-07-09_22-16-46/pytorch_model.bin) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.9.1+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3
Team-PIXEL/pixel-base-finetuned-parsing-ud-english-ewt
479ff0c69592d092b18f4a46f4b1b38c51a89c55
2022-07-13T15:01:13.000Z
[ "pytorch", "pixel", "transformers" ]
null
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-parsing-ud-english-ewt
2
null
transformers
27,447
Entry not found
Team-PIXEL/pixel-base-finetuned-parsing-ud-japanese-gsd
0465819e183aa17c95e6220ceacba18f0ffdd58d
2022-07-13T15:16:46.000Z
[ "pytorch", "pixel", "transformers" ]
null
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-parsing-ud-japanese-gsd
2
null
transformers
27,448
Entry not found
Team-PIXEL/pixel-base-finetuned-parsing-ud-korean-gsd
02ab5042071b0daae583853c291748c7dc86a7eb
2022-07-13T15:24:00.000Z
[ "pytorch", "pixel", "transformers" ]
null
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-parsing-ud-korean-gsd
2
null
transformers
27,449
Entry not found
ghadeermobasher/Original-BiomedNLP-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED_Dis-320-8-10
2d9616284397c002115158871fec9e3777f5988d
2022-07-13T17:05:33.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-BiomedNLP-bluebert_pubmed_uncased_L-12_H-768_A-12-BioRED_Dis-320-8-10
2
null
transformers
27,450
Entry not found
Bistolero/mt5_32b_DP_1ep
49a7edcbeed44ddc87298921a97456a02eea58a2
2022-07-13T17:06:36.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Bistolero
null
Bistolero/mt5_32b_DP_1ep
2
null
transformers
27,451
Entry not found
nloc2578/2
4cff7efd32696f70e35a5b9ce67d00e3c767229f
2022-07-13T20:39:00.000Z
[ "pytorch", "tensorboard", "pegasus", "text2text-generation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
nloc2578
null
nloc2578/2
2
null
transformers
27,452
--- tags: - generated_from_trainer model-index: - name: '2' results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 2 This model is a fine-tuned version of [google/pegasus-xsum](https://huggingface.co/google/pegasus-xsum) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: nan ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0015 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7584 | 0.33 | 1500 | 2.7788 | | 3.3283 | 0.67 | 3000 | 3.1709 | | 3.365 | 1.0 | 4500 | 3.1651 | | 3.1237 | 1.34 | 6000 | nan | | 0.0 | 1.67 | 7500 | nan | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Tokenizers 0.12.1
PontifexMaximus/opus-mt-ur-en-finetuned-ur-to-en
2c56d158c7ca3790132de601e888204a7c95ab77
2022-07-14T05:13:12.000Z
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PontifexMaximus
null
PontifexMaximus/opus-mt-ur-en-finetuned-ur-to-en
2
null
transformers
27,453
Entry not found
liyijing024/swin-base-patch4-window7-224-in22k-finetuned
89a4847ce7065b6fa8bbffba2b877b7384a3b41d
2022-07-14T06:53:34.000Z
[ "pytorch", "tensorboard", "swin", "image-classification", "dataset:imagefolder", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
image-classification
false
liyijing024
null
liyijing024/swin-base-patch4-window7-224-in22k-finetuned
2
null
transformers
27,454
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-base-patch4-window7-224-in22k-finetuned results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder args: default metrics: - name: Accuracy type: accuracy value: 0.9993279702725674 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-base-patch4-window7-224-in22k-finetuned This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224-in22k](https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0021 - Accuracy: 0.9993 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0253 | 1.0 | 889 | 0.0060 | 0.9980 | | 0.0134 | 2.0 | 1778 | 0.0031 | 0.9989 | | 0.0118 | 3.0 | 2667 | 0.0021 | 0.9993 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.8.0+cu111 - Datasets 2.3.3.dev0 - Tokenizers 0.12.1
zeehen/dummy-model
a6a004586025572a2280fffc397e25258d1f5589
2022-07-14T05:45:23.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
zeehen
null
zeehen/dummy-model
2
null
transformers
27,455
Entry not found
rajat99/Fine_Tuning_XLSR_300M_testing_4_model
8cbfd1c0d69bf6a6b5d0847c4e5adb3e8eecb082
2022-07-14T06:15:09.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
rajat99
null
rajat99/Fine_Tuning_XLSR_300M_testing_4_model
2
null
transformers
27,456
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: Fine_Tuning_XLSR_300M_testing_4_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Fine_Tuning_XLSR_300M_testing_4_model This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.1 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
NinaXiao/distilroberta-base-wiki-mark
be725281106c5f1b0250aeae98c08e4ad4617f66
2022-07-14T09:05:03.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
NinaXiao
null
NinaXiao/distilroberta-base-wiki-mark
2
null
transformers
27,457
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilroberta-base-wiki-mark results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilroberta-base-wiki-mark This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0062 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.2841 | 1.0 | 1265 | 2.0553 | | 2.1536 | 2.0 | 2530 | 1.9840 | | 2.1067 | 3.0 | 3795 | 1.9731 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
big-kek/large-korzh
6d1de491c2ea2941caab065b9e8a64105ad69d4f
2022-07-14T17:04:16.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
big-kek
null
big-kek/large-korzh
2
null
transformers
27,458
Entry not found
JoonJoon/t5-small-finetuned-xsum
f68e2a51f86812a5e7931f297c2bd6b4942d7495
2022-07-14T19:28:50.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
JoonJoon
null
JoonJoon/t5-small-finetuned-xsum
2
null
transformers
27,459
Entry not found
natnova/xlm-roberta-base-finetuned-panx-de
60f372135d71a16c09f7df5e80361df1455d2bd5
2022-07-14T13:29:37.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
natnova
null
natnova/xlm-roberta-base-finetuned-panx-de
2
null
transformers
27,460
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8648740833380706 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1365 - F1: 0.8649 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2553 | 1.0 | 525 | 0.1575 | 0.8279 | | 0.1284 | 2.0 | 1050 | 0.1386 | 0.8463 | | 0.0813 | 3.0 | 1575 | 0.1365 | 0.8649 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.12.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
JoonJoon/swin-tiny-patch4-window7-224-finetuned-eurosat
2989fa339e3412ea251adb650defd9c38dfe67e7
2022-07-14T14:58:59.000Z
[ "pytorch", "swin", "image-classification", "dataset:imagefolder", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
image-classification
false
JoonJoon
null
JoonJoon/swin-tiny-patch4-window7-224-finetuned-eurosat
2
null
transformers
27,461
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-finetuned-eurosat results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder args: default metrics: - name: Accuracy type: accuracy value: 0.9725925925925926 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0814 - Accuracy: 0.9726 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 96 - eval_batch_size: 96 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 384 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3216 | 0.99 | 63 | 0.1349 | 0.9589 | | 0.2 | 1.99 | 126 | 0.0873 | 0.9704 | | 0.1664 | 2.99 | 189 | 0.0814 | 0.9726 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.10.2+cu102 - Datasets 2.3.2 - Tokenizers 0.11.6
ericklerouge123/xlm-roberta-base-finetuned-panx-de-fr
0f4cde7007f5eba0ec445b8359a4e07f9fb12445
2022-07-14T16:17:52.000Z
[ "pytorch", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
ericklerouge123
null
ericklerouge123/xlm-roberta-base-finetuned-panx-de-fr
2
null
transformers
27,462
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de-fr results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.en metrics: - name: F1 type: f1 value: 0.6886160714285715 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.4043 - F1: 0.6886 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.1347 | 1.0 | 50 | 0.5771 | 0.4880 | | 0.5066 | 2.0 | 100 | 0.4209 | 0.6582 | | 0.3631 | 3.0 | 150 | 0.4043 | 0.6886 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.12.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
Team-PIXEL/pixel-base-finetuned-jaquad
07d9e058954644c7f8a31823a272da86bc8d578b
2022-07-14T16:07:40.000Z
[ "pytorch", "pixel", "question-answering", "dataset:SkelterLabsInc/JaQuAD", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
question-answering
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-jaquad
2
null
transformers
27,463
--- tags: - generated_from_trainer datasets: - SkelterLabsInc/JaQuAD model-index: - name: pixel-base-finetuned-jaquad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pixel-base-finetuned-jaquad This model is a fine-tuned version of [Team-PIXEL/pixel-base](https://huggingface.co/Team-PIXEL/pixel-base) on the SkelterLabsInc/JaQuAD dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 45 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - training_steps: 20000 - mixed_precision_training: Apex, opt level O1 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0 - Datasets 2.0.0 - Tokenizers 0.12.1
ericklerouge123/xlm-roberta-base-finetuned-panx-all
a1be08195605d07bb714a020e4797286ab9e3add
2022-07-14T16:46:09.000Z
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
ericklerouge123
null
ericklerouge123/xlm-roberta-base-finetuned-panx-all
2
null
transformers
27,464
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-all This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1348 - F1: 0.8844 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.3055 | 1.0 | 835 | 0.1755 | 0.8272 | | 0.1561 | 2.0 | 1670 | 0.1441 | 0.8727 | | 0.1016 | 3.0 | 2505 | 0.1348 | 0.8844 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.12.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
JoonJoon/gpt2-wikitext2
211c4a191fce333b1abc3c96f138c87083160328
2022-07-14T20:23:03.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
JoonJoon
null
JoonJoon/gpt2-wikitext2
2
null
transformers
27,465
Entry not found
JoonJoon/bert-base-cased-wikitext2
62af6ecc6cb27a918ff57c3261ed0ae7a295c2c8
2022-07-14T20:57:50.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
fill-mask
false
JoonJoon
null
JoonJoon/bert-base-cased-wikitext2
2
null
transformers
27,466
--- tags: - generated_from_trainer model-index: - name: bert-base-cased-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-cased-wikitext2 This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 6.9846 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 7.7422 | 1.0 | 782 | 7.1373 | | 7.0302 | 2.0 | 1564 | 6.9972 | | 6.9788 | 3.0 | 2346 | 7.0087 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.12.0+cu102 - Datasets 1.14.0 - Tokenizers 0.10.3
johanna-k/small-pw-test
c4538e3c2ccfd79437678511bc6374ae38979fc0
2022-07-14T21:31:25.000Z
[ "pytorch", "canine", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
johanna-k
null
johanna-k/small-pw-test
2
null
transformers
27,467
Entry not found
doraemon1998/t5-small-finetuned-en-to-ro
ef4eb7b6bcc13a3387d83edabd078c972d68498a
2022-07-15T00:46:48.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
doraemon1998
null
doraemon1998/t5-small-finetuned-en-to-ro
2
null
transformers
27,468
Entry not found
doraemon1998/t5-small-finetuned-labels-to-caption
fa69d502357a3efd1b9b25c795c48e8fad806c10
2022-07-15T09:53:07.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
doraemon1998
null
doraemon1998/t5-small-finetuned-labels-to-caption
2
null
transformers
27,469
Entry not found
Team-PIXEL/pixel-base-finetuned-masakhaner-hau
e2df9dbb7b5ec2c2fe1ec81c60b677e2ce3c4073
2022-07-15T03:14:32.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-masakhaner-hau
2
null
transformers
27,470
Entry not found
Team-PIXEL/pixel-base-finetuned-masakhaner-ibo
6380a0912a836059dc3c831f4522f98e42d73ca9
2022-07-15T03:18:06.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-masakhaner-ibo
2
null
transformers
27,471
Entry not found
Team-PIXEL/pixel-base-finetuned-masakhaner-kin
8dc9689cad7f91bfccc02023ea96099c96811aae
2022-07-15T03:20:40.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-masakhaner-kin
2
null
transformers
27,472
Entry not found
Team-PIXEL/pixel-base-finetuned-masakhaner-lug
6e33be4b6469c73cf1ec8126d8d520a9908553cd
2022-07-15T03:23:15.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-masakhaner-lug
2
null
transformers
27,473
Entry not found
Team-PIXEL/pixel-base-finetuned-masakhaner-luo
99af8628d7e6a762035fce0c118a679e79bd8e9e
2022-07-15T03:24:58.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-masakhaner-luo
2
null
transformers
27,474
Entry not found
Team-PIXEL/pixel-base-finetuned-masakhaner-swa
3ca6ee1a2084a8891a9fffb9606147ee8a083bcf
2022-07-15T03:29:45.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-masakhaner-swa
2
null
transformers
27,475
Entry not found
Team-PIXEL/pixel-base-finetuned-masakhaner-wol
4be5be9e60e0b0a96c91d8474ff40f6570ffd0f4
2022-07-15T03:31:50.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-masakhaner-wol
2
null
transformers
27,476
Entry not found
WENGSYX/CPMT
4c802109fd35a08b1997349f733b87002db901be
2022-07-15T07:29:56.000Z
[ "pytorch", "bart", "transformers", "license:mit" ]
null
false
WENGSYX
null
WENGSYX/CPMT
2
null
transformers
27,477
--- license: mit --- 现有的少数民族语言预训练模型仍然较为稀缺,尽管国内少数民族语言模型CINO具有较强的理解能力,但仍然缺乏面向生成与翻译领域的研究。 CMPT (Chinese Minority Pre-Trained Language Model) 是在BART的基础上,加入DeepNorm预训练的超深层生成模型。其最大具有128+128层。其在超过10G的汉英维藏蒙语料中进行受限预训练。其具有较强的理解与生成性能。 **Github Link:** https://github.com/WENGSYX/CMPT ## Usage ```python >>> from modeling_cmpt import BartForConditionalGeneration >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained('./CMTP') >>> model = BartForConditionalGeneration.from_pretrained('./CMTP') >>> inputs = tokenizer.encode("Hello world, 你好 世界", return_tensors='pt') >>> pred_ids = model.generate(input_ids, num_beams=4, max_length=20) >>> print(tokenizer.convert_ids_to_tokens(pred_ids[i])) ```
Lyla/bert-base-uncased-finetuned-swag
c59c92d1fe04fe59b2c108a3802a246fbf56522f
2022-07-15T10:22:29.000Z
[ "pytorch", "tensorboard", "bert", "multiple-choice", "transformers" ]
multiple-choice
false
Lyla
null
Lyla/bert-base-uncased-finetuned-swag
2
null
transformers
27,478
Entry not found
karsab/distilbert-base-uncased-finetuned-imdb
97f36ecd8969a8ffde6954f68631f88c18725cc9
2022-07-15T12:21:46.000Z
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
karsab
null
karsab/distilbert-base-uncased-finetuned-imdb
2
null
transformers
27,479
Entry not found
lucashu/TcmYiAnBERT
4d3cbdca96e3fb30b392d0a271f5df03200fb195
2022-07-22T14:45:52.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
lucashu
null
lucashu/TcmYiAnBERT
2
null
transformers
27,480
# 概述 基于大规模中医医案数据在BERT中文模型上继续训练300轮得到的预训练模型 # 使用方式 ```python from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("lucashu/TcmYiAnBERT") model = AutoModelForMaskedLM.from_pretrained("lucashu/TcmYiAnBERT") ```
mipatov/NeuroSkeptic
2b461bbe6a2811f53d0f6dc3d5006df7c4ce533b
2022-07-15T17:20:56.000Z
[ "pytorch", "opt", "text-generation", "transformers" ]
text-generation
false
mipatov
null
mipatov/NeuroSkeptic
2
null
transformers
27,481
Entry not found
dspg/distilbert-base-uncased-finetuned-squad
a6945318ab2f4fbc8ecab00b0c60708f4bda33af
2022-07-15T21:34:13.000Z
[ "pytorch", "distilbert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
dspg
null
dspg/distilbert-base-uncased-finetuned-squad
2
null
transformers
27,482
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.1596 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2265 | 1.0 | 5533 | 1.1572 | | 0.9548 | 2.0 | 11066 | 1.1278 | | 0.7396 | 3.0 | 16599 | 1.1596 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.3.2 - Tokenizers 0.12.1
jens-simon/xls-r-300m-sv-2
fb8a3ff80bdf775d462c84d7313b8e4fd8cf283f
2022-07-16T15:49:39.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
jens-simon
null
jens-simon/xls-r-300m-sv-2
2
null
transformers
27,483
Try this.
Aktsvigun/bart-base_abssum_debate_23419
bc29b8ac4d4aa51b0612985efd5f5e13ac33001a
2022-07-16T17:53:02.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_23419
2
null
transformers
27,484
Entry not found
Aktsvigun/bart-base_abssum_debate_705525
c3444ad517ab547127b22c8e16bfbfd100f4993a
2022-07-16T18:07:20.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_705525
2
null
transformers
27,485
Entry not found
Aktsvigun/bart-base_abssum_debate_4837
efb99c544b545f6269bf9b521eb918cdfd3d4405
2022-07-16T18:22:11.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_4837
2
null
transformers
27,486
Entry not found
Aktsvigun/bart-base_abssum_debate_42
a92261765bf74985c4a57b1e98c03712d7cd2133
2022-07-16T18:41:54.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_42
2
null
transformers
27,487
Entry not found
Aktsvigun/bart-base_abssum_debate_919213
e1bdaccd1d2ce6c80a715dfd6a8a37d226d54154
2022-07-16T18:56:14.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_919213
2
null
transformers
27,488
Entry not found
Aktsvigun/bart-base_abssum_debate_9467153
8a63f1e2a8b43a434c63232d8ce920bea4eda307
2022-07-16T19:12:00.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_9467153
2
null
transformers
27,489
Entry not found
Aktsvigun/bart-base_abssum_debate_6585777
d9f8ce9cb9d7d4924b9b42875c5fbe698193afdb
2022-07-16T19:26:25.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_6585777
2
null
transformers
27,490
Entry not found
Aktsvigun/bart-base_abssum_debate_3878022
7cb747b38e75063e3287277e09ad683e8b0b912d
2022-07-16T19:40:59.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_3878022
2
null
transformers
27,491
Entry not found
Aktsvigun/bart-base_abssum_debate_5537116
3288e9cb3207fb5c660d4768b6217743f55ca484
2022-07-16T19:52:47.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_5537116
2
null
transformers
27,492
Entry not found
Aktsvigun/bart-base_abssum_debate_5893459
ebb346056fae13067228655ca3fe1071024f4bfe
2022-07-16T20:06:55.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_5893459
2
null
transformers
27,493
Entry not found
Aktsvigun/bart-base_abssum_debate_8653685
397596a24513f5d28ff6cbc69831474f6c3d5aa3
2022-07-16T20:20:43.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_8653685
2
null
transformers
27,494
Entry not found
Aktsvigun/bart-base_abssum_debate_6880281
5966243dd07c2c8b65d4904b7f7ad63dae110138
2022-07-16T20:34:40.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_6880281
2
null
transformers
27,495
Entry not found
Aktsvigun/bart-base_abssum_debate_9478495
3c87297653ae521faf3a07e14779c69ca63db43c
2022-07-16T20:47:12.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_9478495
2
null
transformers
27,496
Entry not found
Aktsvigun/bart-base_abssum_debate_2930982
710086792c4da7c8520b12de3db6d15eb8437568
2022-07-16T21:03:07.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_2930982
2
null
transformers
27,497
Entry not found
Aktsvigun/bart-base_abssum_debate_7629317
d48c4ed9e9fac7d553ccd698b078628f2fde06c8
2022-07-16T21:17:52.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_7629317
2
null
transformers
27,498
Entry not found
Aktsvigun/bart-base_abssum_debate_4065329
e47ca4b04474901520628384c9dbac305d572695
2022-07-16T21:33:07.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_abssum_debate_4065329
2
null
transformers
27,499
Entry not found