modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
WikinewsSum/bert2bert-multi-fr-wiki-news
1ce94b46c6844911f81e436286dda2b701d7878a
2020-08-11T09:05:51.000Z
[ "pytorch", "encoder-decoder", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
WikinewsSum
null
WikinewsSum/bert2bert-multi-fr-wiki-news
1
null
transformers
28,500
Entry not found
Wintermute/Wintermute_extended
6542afcb93569c7612a8b69175e48402e0d3c1e1
2021-05-21T11:42:01.000Z
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
false
Wintermute
null
Wintermute/Wintermute_extended
1
null
transformers
28,501
Entry not found
XuguangAi/DialoGPT-small-Leslie
55c4edf627fe811829983edad20b8a249a08925d
2021-12-03T20:56:53.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
XuguangAi
null
XuguangAi/DialoGPT-small-Leslie
1
null
transformers
28,502
--- tags: - conversational --- # Leslie
XuguangAi/DialoGPT-small-Rick
f23061315d267e69e91355b5d516187127388bd3
2021-12-03T18:09:15.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
XuguangAi
null
XuguangAi/DialoGPT-small-Rick
1
null
transformers
28,503
--- tags: - conversational --- # Rick
Yankee/test1234
caa8db0e468698f78721f88240c28b8869b3583d
2022-01-29T12:10:10.000Z
[ "pytorch", "conversational" ]
conversational
false
Yankee
null
Yankee/test1234
1
null
null
28,504
--- tags: - conversational --- #test
Yixuan/wav2vec2-large-xls-r-300m-turkish-colab
67e1a93e8127c4695c7917e099eefe491ae3a241
2022-01-26T22:54:18.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
Yixuan
null
Yixuan/wav2vec2-large-xls-r-300m-turkish-colab
1
null
transformers
28,505
Entry not found
YusufSahin99/IFIS_ZORK_AI_HORROR
eb47e2fd4d873b8ef79019aa07303f8bc07d24e2
2021-07-14T14:11:24.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
false
YusufSahin99
null
YusufSahin99/IFIS_ZORK_AI_HORROR
1
null
transformers
28,506
--- license: mit tags: - generated_from_trainer model_index: - name: IFIS_ZORK_AI_HORROR results: - task: name: Causal Language Modeling type: text-generation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IFIS_ZORK_AI_HORROR This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unkown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.8.2 - Pytorch 1.9.0+cu102 - Tokenizers 0.10.3
YusufSahin99/IFIS_ZORK_AI_MODERN
355561a61429099cd97873856319879f1c980490
2021-07-14T15:12:29.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
false
YusufSahin99
null
YusufSahin99/IFIS_ZORK_AI_MODERN
1
null
transformers
28,507
--- license: mit tags: - generated_from_trainer model_index: - name: IFIS_ZORK_AI_MODERN results: - task: name: Causal Language Modeling type: text-generation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IFIS_ZORK_AI_MODERN This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unkown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.8.2 - Pytorch 1.9.0+cu102 - Tokenizers 0.10.3
YusufSahin99/IFIS_ZORK_AI_SCIFI
0418e093ca52a1cc5d00564738b1f9a87ae77194
2021-07-13T15:34:34.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
false
YusufSahin99
null
YusufSahin99/IFIS_ZORK_AI_SCIFI
1
null
transformers
28,508
--- license: mit tags: - generated_from_trainer model_index: - name: IFIS_ZORK_AI_SCIFI results: - task: name: Causal Language Modeling type: text-generation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IFIS_ZORK_AI_SCIFI This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unkown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.8.2 - Pytorch 1.9.0+cu102 - Tokenizers 0.10.3
YusufSahin99/Zork_AI_SciFi
7bed41fcd673b2777591243b47c5311975f1fc58
2021-07-13T14:58:01.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
false
YusufSahin99
null
YusufSahin99/Zork_AI_SciFi
1
null
transformers
28,509
--- license: mit tags: - generated_from_trainer model_index: - name: Zork_AI_SciFi results: - task: name: Causal Language Modeling type: text-generation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Zork_AI_SciFi This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unkown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.8.2 - Pytorch 1.9.0+cu102 - Tokenizers 0.10.3
ZYW/squad-mbart-model
a1ebd77f18a0c18f2c11846806ff1ab0b054d50c
2021-05-30T16:12:15.000Z
[ "pytorch", "mbart", "question-answering", "transformers", "model-index", "autotrain_compatible" ]
question-answering
false
ZYW
null
ZYW/squad-mbart-model
1
null
transformers
28,510
--- model-index: - name: squad-mbart-model --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # squad-mbart-model This model was trained from scratch on an unkown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.6.1 - Pytorch 1.8.1+cu101 - Datasets 1.7.0 - Tokenizers 0.10.3
ZYW/squad-mbert-en-de-es-model
bc103b41234e2c595115dd0dbfb5d948592945a5
2021-05-30T22:33:10.000Z
[ "pytorch", "bert", "question-answering", "transformers", "model-index", "autotrain_compatible" ]
question-answering
false
ZYW
null
ZYW/squad-mbert-en-de-es-model
1
null
transformers
28,511
--- model-index: - name: squad-mbert-en-de-es-model --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # squad-mbert-en-de-es-model This model was trained from scratch on an unkown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.6.1 - Pytorch 1.8.1+cu101 - Datasets 1.7.0 - Tokenizers 0.10.3
ZYW/squad-mbert-en-de-es-vi-zh-model
12212d870a7a876b76bfceb07b84d12bc813e291
2021-05-31T05:43:16.000Z
[ "pytorch", "bert", "question-answering", "transformers", "model-index", "autotrain_compatible" ]
question-answering
false
ZYW
null
ZYW/squad-mbert-en-de-es-vi-zh-model
1
null
transformers
28,512
--- model-index: - name: squad-mbert-en-de-es-vi-zh-model --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # squad-mbert-en-de-es-vi-zh-model This model was trained from scratch on an unkown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.6.1 - Pytorch 1.8.1+cu101 - Datasets 1.7.0 - Tokenizers 0.10.3
ZYW/squad-mbert-model_2
5e40804ae62d2a302f8d832434388143d3d5f90a
2021-05-30T18:18:37.000Z
[ "pytorch", "bert", "question-answering", "transformers", "model-index", "autotrain_compatible" ]
question-answering
false
ZYW
null
ZYW/squad-mbert-model_2
1
null
transformers
28,513
--- model-index: - name: squad-mbert-model_2 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # squad-mbert-model_2 This model was trained from scratch on an unkown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.6.1 - Pytorch 1.8.1+cu101 - Datasets 1.7.0 - Tokenizers 0.10.3
Zephaus/Chromrepo
3236cbbd330532404c5104deaac14800f2a5dc5b
2022-02-17T05:21:06.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
Zephaus
null
Zephaus/Chromrepo
1
null
transformers
28,514
--- tags: - conversational --- # Chrombot
ZhaoyiGUAN/Bert_Fintuning_Test1
404e65e8f4c43e7a20e7e85c2fcc0324bb9088cc
2021-09-27T05:56:04.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
ZhaoyiGUAN
null
ZhaoyiGUAN/Bert_Fintuning_Test1
1
null
transformers
28,515
Entry not found
ZhaoyiGUAN/Bert_cn_finetuning_1
30607826d1e5dce88c3004b21e12b81635eabbb5
2021-09-27T07:49:00.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
ZhaoyiGUAN
null
ZhaoyiGUAN/Bert_cn_finetuning_1
1
null
transformers
28,516
Entry not found
Zichuu/spert
a0e06ac5cfa24803d6394d6bd603caf1c4f08749
2021-11-03T04:45:41.000Z
[ "pytorch", "bert", "transformers" ]
null
false
Zichuu
null
Zichuu/spert
1
null
transformers
28,517
# SpERT SpERT is the Relation Extraction model [(SpERT)Span-based Entity and Relation Transformer](https://github.com/lavis-nlp/spert).This is the model trained with CoNLL04 Dataset. ## Use ## References ``` Markus Eberts, Adrian Ulges. Span-based Joint Entity and Relation Extraction with Transformer Pre-training. 24th European Conference on Artificial Intelligence, 2020. ```
Zirk/wav2vec2-base-timit-demo-colab
4e3efe59dfc1c927a253d05582a9c526f00c1266
2022-02-18T09:18:19.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Zirk
null
Zirk/wav2vec2-base-timit-demo-colab
1
null
transformers
28,518
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-colab This model is a fine-tuned version of [jonatasgrosman/wav2vec2-large-xlsr-53-chinese-zh-cn](https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-chinese-zh-cn) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.01 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1 - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.10.3
ab20211112/distilbert-base-uncased-finetuned-squad
cb173be9554e6f26ea38ee2da654518ca5efe85e
2021-11-16T13:59:38.000Z
[ "pytorch", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
ab20211112
null
ab20211112/distilbert-base-uncased-finetuned-squad
1
null
transformers
28,519
Entry not found
abdouaziiz/soraberta
dbaf2580bce224dcc99c6364a3d86267ceae87dd
2021-09-24T11:31:32.000Z
[ "pytorch", "roberta", "fill-mask", "wo", "arxiv:1907.11692", "transformers", "language-model", "wolof", "autotrain_compatible" ]
fill-mask
false
abdouaziiz
null
abdouaziiz/soraberta
1
null
transformers
28,520
--- language: wo tags: - roberta - language-model - wo - wolof --- # Soraberta: Unsupervised Language Model Pre-training for Wolof **Soraberta** is pretrained roberta-base model on wolof language . Roberta was introduced in [this paper](https://arxiv.org/abs/1907.11692) ## Soraberta models | Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters | | :------: | :---: | :---: | :---: | :---: | | `soraberta-base` | 6 | 12 | 514 | 83 M | ## Using Soraberta with Hugging Face's Transformers ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='abdouaziiz/soraberta') >>> unmasker("juroom naari jullit man nanoo boole jend aw nag walla <mask>.") [{'sequence': 'juroom naari jullit man nanoo boole jend aw nag walla gileem.', 'score': 0.9783930778503418, 'token': 4621, 'token_str': ' gileem'}, {'sequence': 'juroom naari jullit man nanoo boole jend aw nag walla jend.', 'score': 0.009271537885069847, 'token': 2155, 'token_str': ' jend'}, {'sequence': 'juroom naari jullit man nanoo boole jend aw nag walla aw.', 'score': 0.0027585660573095083, 'token': 704, 'token_str': ' aw'}, {'sequence': 'juroom naari jullit man nanoo boole jend aw nag walla pel.', 'score': 0.001120452769100666, 'token': 1171, 'token_str': ' pel'}, {'sequence': 'juroom naari jullit man nanoo boole jend aw nag walla juum.', 'score': 0.0005133090307936072, 'token': 5820, 'token_str': ' juum'}] ``` ## Training data The data sources are [Bible OT](http://biblewolof.com/) , [WOLOF-ONLINE](http://www.wolof-online.com/) ## Contact Please contact [email protected] for any question, feedback or request.
abhijithneilabraham/pubmed-summarisation-pegasus
9d188552a2d6efd8206e17a6f4c24f10596d519c
2021-12-16T08:24:30.000Z
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
abhijithneilabraham
null
abhijithneilabraham/pubmed-summarisation-pegasus
1
null
transformers
28,521
Entry not found
abhinema/testauto
2a9729eb29e9470040dcccb6a3e4ac10f386fe4f
2022-01-03T03:39:47.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
abhinema
null
abhinema/testauto
1
null
transformers
28,522
Entry not found
adalbertojunior/test-256-uncased-2
17812ef8e6509a31c1f5d3cedf0b12f1b066f46f
2021-11-23T22:40:09.000Z
[ "pytorch", "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
adalbertojunior
null
adalbertojunior/test-256-uncased-2
1
null
transformers
28,523
Entry not found
adalbertojunior/test-256-uncased
f555a5cbdb19979ac63e1070f063e97c374be93b
2021-10-22T03:46:25.000Z
[ "pytorch", "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
adalbertojunior
null
adalbertojunior/test-256-uncased
1
null
transformers
28,524
Entry not found
adamlin/tus_21-delex_5000
f8d1d183f5e8431655112afa7308c344933be68b
2021-04-08T14:25:30.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
adamlin
null
adamlin/tus_21-delex_5000
1
null
transformers
28,525
Entry not found
adamlin/usr-topicalchat-uk
c381748b06283b39dca2ee9f1596ddb7055b19bf
2021-06-28T13:00:11.000Z
[ "pytorch", "transformers" ]
null
false
adamlin
null
adamlin/usr-topicalchat-uk
1
null
transformers
28,526
Entry not found
addy88/code-t5-ruby
fd048f0605bfcfe6d7b8d591a0caf7afbed4a724
2022-01-02T14:30:57.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
addy88
null
addy88/code-t5-ruby
1
null
transformers
28,527
Entry not found
addy88/wav2vec2-base-timit-english
27caf1ad8b4fe3ebedf2a74ff6ad144f08e850f9
2021-12-09T07:36:20.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
addy88
null
addy88/wav2vec2-base-timit-english
1
null
transformers
28,528
Entry not found
addy88/wav2vec2-gujarati-stt
9bf62903e69d363e3a0d89d9ae29e1679bc9238d
2021-12-19T15:14:38.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
addy88
null
addy88/wav2vec2-gujarati-stt
1
null
transformers
28,529
## Usage The model can be used directly (without a language model) as follows: ```python import soundfile as sf import torch from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import argparse def parse_transcription(wav_file): # load pretrained model processor = Wav2Vec2Processor.from_pretrained("addy88/wav2vec2-gujarati-stt") model = Wav2Vec2ForCTC.from_pretrained("addy88/wav2vec2-gujarati-stt") # load audio audio_input, sample_rate = sf.read(wav_file) # pad input values and return pt tensor input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values # INFERENCE # retrieve logits & take argmax logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe transcription = processor.decode(predicted_ids[0], skip_special_tokens=True) print(transcription) ```
addy88/wav2vec2-punjabi-stt
75c243f095615c99544f25ab86dca9f38d65a336
2021-12-19T15:04:43.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
addy88
null
addy88/wav2vec2-punjabi-stt
1
null
transformers
28,530
## Usage The model can be used directly (without a language model) as follows: ```python import soundfile as sf import torch from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import argparse def parse_transcription(wav_file): # load pretrained model processor = Wav2Vec2Processor.from_pretrained("addy88/wav2vec2-punjabi-stt") model = Wav2Vec2ForCTC.from_pretrained("addy88/wav2vec2-punjabi-stt") # load audio audio_input, sample_rate = sf.read(wav_file) # pad input values and return pt tensor input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values # INFERENCE # retrieve logits & take argmax logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe transcription = processor.decode(predicted_ids[0], skip_special_tokens=True) print(transcription) ```
addy88/wav2vec2-sanskrit-stt
f850d9109d3539782596d94cf9ae805b07add340
2021-12-19T13:38:52.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
addy88
null
addy88/wav2vec2-sanskrit-stt
1
null
transformers
28,531
## Usage The model can be used directly (without a language model) as follows: ```python import soundfile as sf import torch from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import argparse def parse_transcription(wav_file): # load pretrained model processor = Wav2Vec2Processor.from_pretrained("addy88/wav2vec2-sanskrit-stt") model = Wav2Vec2ForCTC.from_pretrained("addy88/wav2vec2-sanskrit-stt") # load audio audio_input, sample_rate = sf.read(wav_file) # pad input values and return pt tensor input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values # INFERENCE # retrieve logits & take argmax logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe transcription = processor.decode(predicted_ids[0], skip_special_tokens=True) print(transcription) ```
adit94/t5_emotion
0d77341f38bea60c2f21c568e7c0430056d5bf63
2021-08-30T12:25:58.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
adit94
null
adit94/t5_emotion
1
null
transformers
28,532
Entry not found
aditeyabaral/additionalpretrained-contrastive-bert-base-cased
658b4b07aabfe56c8374f946ec4bb59c8905ffba
2021-11-14T14:43:02.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
aditeyabaral
null
aditeyabaral/additionalpretrained-contrastive-bert-base-cased
1
null
transformers
28,533
Entry not found
aditeyabaral/additionalpretrained-roberta-base
aa283f25d1c01d16b02c1bc807188961d04d8fb0
2021-10-21T18:03:10.000Z
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
false
aditeyabaral
null
aditeyabaral/additionalpretrained-roberta-base
1
null
transformers
28,534
Entry not found
aditeyabaral/additionalpretrained-roberta-hinglish-big
8e245b78dfc8b7e4f29ce7e9fdbed00e211e0f56
2021-10-20T18:28:00.000Z
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
false
aditeyabaral
null
aditeyabaral/additionalpretrained-roberta-hinglish-big
1
null
transformers
28,535
Entry not found
aditeyabaral/additionalpretrained-xlm-roberta-base
13aa8b3c2f9224a15f2930ae06276a17f23f0cdf
2021-10-24T04:55:30.000Z
[ "pytorch", "xlm-roberta", "feature-extraction", "transformers" ]
feature-extraction
false
aditeyabaral
null
aditeyabaral/additionalpretrained-xlm-roberta-base
1
null
transformers
28,536
Entry not found
aditeyabaral/distilbert-hinglish-big
ef7d10f080c41826d8a504094ccd03f046e62280
2021-10-12T00:18:47.000Z
[ "pytorch", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
aditeyabaral
null
aditeyabaral/distilbert-hinglish-big
1
null
transformers
28,537
Entry not found
aditeyabaral/roberta-hinglish-big
4afc17dccd5210bec6b0262356138f9afa92cb89
2021-09-25T15:22:17.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
aditeyabaral
null
aditeyabaral/roberta-hinglish-big
1
null
transformers
28,538
Entry not found
aditeyabaral/sentencetransformer-distilbert-hinglish-small
f520d4902ff71dba8b5bc6d51e8b778a25142998
2021-10-20T09:04:04.000Z
[ "pytorch", "distilbert", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
aditeyabaral
null
aditeyabaral/sentencetransformer-distilbert-hinglish-small
1
null
sentence-transformers
28,539
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # aditeyabaral/sentencetransformer-distilbert-hinglish-small This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('aditeyabaral/sentencetransformer-distilbert-hinglish-small') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('aditeyabaral/sentencetransformer-distilbert-hinglish-small') model = AutoModel.from_pretrained('aditeyabaral/sentencetransformer-distilbert-hinglish-small') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=aditeyabaral/sentencetransformer-distilbert-hinglish-small) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 4617 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 100, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
aditeyabaral/sentencetransformer-roberta-hinglish-big
1ecbbf08a16ed1c49e7b6585d8cb3a5fd093108f
2021-10-19T22:41:56.000Z
[ "pytorch", "roberta", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
aditeyabaral
null
aditeyabaral/sentencetransformer-roberta-hinglish-big
1
null
sentence-transformers
28,540
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # aditeyabaral/sentencetransformer-roberta-hinglish-big This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('aditeyabaral/sentencetransformer-roberta-hinglish-big') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('aditeyabaral/sentencetransformer-roberta-hinglish-big') model = AutoModel.from_pretrained('aditeyabaral/sentencetransformer-roberta-hinglish-big') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=aditeyabaral/sentencetransformer-roberta-hinglish-big) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 4617 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 100, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
adresgezgini/wav2vec-tr-lite-AG
868cefaac6e65cd42fd6d3490f3f8e3680cc4093
2021-07-05T18:56:04.000Z
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "tr", "dataset:common_voice", "transformers", "audio", "speech", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
adresgezgini
null
adresgezgini/wav2vec-tr-lite-AG
1
null
transformers
28,541
--- language: tr datasets: - common_voice metrics: - wer tags: - audio - automatic-speech-recognition - speech license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Turkish by Davut Emre TASAR results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice tr type: common_voice args: tr metrics: - name: Test WER type: wer --- # wav2vec-tr-lite-AG ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "tr", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("emre/wav2vec-tr-lite-AG") model = Wav2Vec2ForCTC.from_pretrained("emre/wav2vec-tr-lite-AG") resampler = torchaudio.transforms.Resample(48_000, 16_000) **Test Result**: 27.30 % [here](https://adresgezgini.com)
ahanadeb/wav2vec2-large-indian-instrument-classification-v1
c5c5d27e2843e7f521e83e71d22a1c33299a1994
2021-11-11T18:56:47.000Z
[ "pytorch", "wav2vec2", "transformers" ]
null
false
ahanadeb
null
ahanadeb/wav2vec2-large-indian-instrument-classification-v1
1
null
transformers
28,542
Hello World!
ahazeemi/wav2vec2-base-timit-demo-colab
9298c44e857c972218c0f6ea89646aa34d687e7d
2021-12-04T17:49:06.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
ahazeemi
null
ahazeemi/wav2vec2-base-timit-demo-colab
1
null
transformers
28,543
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-colab This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3
ahmednasserswe/sentence_distilbert
3d7079baf6315ab5b5bc72aaf1bb0deec87e0e31
2020-06-09T09:02:24.000Z
[ "pytorch", "distilbert", "feature-extraction", "transformers" ]
feature-extraction
false
ahmednasserswe
null
ahmednasserswe/sentence_distilbert
1
null
transformers
28,544
Entry not found
aiface/vivos_prj1tha
13d4631e483ad063a3e4fbb664cf465a41fd3098
2022-02-18T11:35:55.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:vivos_dataset", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
aiface
null
aiface/vivos_prj1tha
1
null
transformers
28,545
--- license: apache-2.0 tags: - generated_from_trainer datasets: - vivos_dataset model-index: - name: vivos_prj1tha results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vivos_prj1tha This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the vivos_dataset dataset. It achieves the following results on the evaluation set: - Loss: 0.7737 - Wer: 0.5128 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.0541 | 10.25 | 400 | 1.0293 | 0.7051 | | 0.5514 | 20.51 | 800 | 0.7737 | 0.5128 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.10.3
aimiekhe/yummv1
cbee126f36a8c8e06cdf03287e8b213b793dd26e
2021-06-06T02:38:56.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
aimiekhe
null
aimiekhe/yummv1
1
null
transformers
28,546
--- tags: - conversational --- # My Awesome Model
aimiekhe/yummv2
f6ec3f678ac78948706b936049b20c9a1593c443
2021-06-06T03:04:24.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
aimiekhe
null
aimiekhe/yummv2
1
null
transformers
28,547
--- tags: - conversational --- # My Awesome Model
ajaiswal1008/wav2vec2-large-xls-r-300m-hi-colab_new
4d5a2ff441ff89964d01a3dd6f0a23a772846c6d
2022-02-10T15:11:14.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
ajaiswal1008
null
ajaiswal1008/wav2vec2-large-xls-r-300m-hi-colab_new
1
null
transformers
28,548
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-hi-colab_new results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-hi-colab_new This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.10.3
ajanco/sr_roberta_oscar
445c0576978f0163953c3b9f22419feaf402658d
2022-01-18T03:20:21.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
ajanco
null
ajanco/sr_roberta_oscar
1
null
transformers
28,549
Entry not found
akadriu/wav2vec2-large-xlsr-53-Total_2e-4
4b7700481ec71e67564920bbff079875862311ec
2022-03-03T06:28:22.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
akadriu
null
akadriu/wav2vec2-large-xlsr-53-Total_2e-4
1
null
transformers
28,550
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-large-xlsr-53-Total_2e-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53-Total_2e-4 This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.2082 - Wer: 0.4355 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 5.0972 | 0.1 | 200 | 2.9261 | 0.9696 | | 2.2492 | 0.2 | 400 | 1.4255 | 0.7957 | | 0.9206 | 0.3 | 600 | 1.1854 | 0.6903 | | 0.8152 | 0.4 | 800 | 1.2227 | 0.6072 | | 0.7284 | 0.5 | 1000 | 1.2009 | 0.5694 | | 0.6503 | 0.6 | 1200 | 1.2563 | 0.5603 | | 0.643 | 0.7 | 1400 | 1.1556 | 0.5404 | | 0.6692 | 0.8 | 1600 | 1.2245 | 0.5176 | | 0.5939 | 0.9 | 1800 | 1.1662 | 0.5116 | | 0.577 | 1.0 | 2000 | 1.1099 | 0.5128 | | 0.5118 | 1.1 | 2200 | 1.3127 | 0.4911 | | 0.5389 | 1.2 | 2400 | 1.0365 | 0.4958 | | 0.5452 | 1.3 | 2600 | 1.0924 | 0.4840 | | 0.5072 | 1.4 | 2800 | 1.2285 | 0.4787 | | 0.514 | 1.5 | 3000 | 1.0627 | 0.4802 | | 0.5275 | 1.6 | 3200 | 1.0770 | 0.4702 | | 0.5064 | 1.7 | 3400 | 1.1287 | 0.4709 | | 0.4837 | 1.8 | 3600 | 1.1389 | 0.4694 | | 0.4939 | 1.9 | 3800 | 1.0724 | 0.4635 | | 0.5104 | 2.0 | 4000 | 1.2553 | 0.4604 | | 0.4439 | 2.1 | 4200 | 1.2482 | 0.4570 | | 0.4546 | 2.2 | 4400 | 1.2378 | 0.4732 | | 0.4294 | 2.3 | 4600 | 1.1122 | 0.4519 | | 0.4533 | 2.4 | 4800 | 1.1338 | 0.4508 | | 0.4526 | 2.5 | 5000 | 1.2038 | 0.4540 | | 0.4642 | 2.6 | 5200 | 1.2188 | 0.4635 | | 0.4403 | 2.7 | 5400 | 1.2394 | 0.4512 | | 0.4485 | 2.8 | 5600 | 1.0510 | 0.4577 | | 0.4614 | 2.9 | 5800 | 1.1459 | 0.4451 | | 0.4233 | 3.0 | 6000 | 1.1758 | 0.4397 | | 0.4013 | 3.1 | 6200 | 1.0858 | 0.4456 | | 0.4166 | 3.2 | 6400 | 1.2246 | 0.4420 | | 0.3998 | 3.3 | 6600 | 1.1516 | 0.4465 | | 0.4106 | 3.4 | 6800 | 1.2585 | 0.4394 | | 0.4031 | 3.5 | 7000 | 1.2514 | 0.4419 | | 0.3858 | 3.6 | 7200 | 1.2545 | 0.4447 | | 0.393 | 3.7 | 7400 | 1.0103 | 0.4387 | | 0.3819 | 3.8 | 7600 | 1.1280 | 0.4355 | | 0.3957 | 3.9 | 7800 | 1.1960 | 0.4476 | | 0.392 | 4.0 | 8000 | 1.1318 | 0.4461 | | 0.355 | 4.1 | 8200 | 1.1822 | 0.4387 | | 0.3377 | 4.2 | 8400 | 1.2258 | 0.4403 | | 0.353 | 4.3 | 8600 | 1.2232 | 0.4350 | | 0.3595 | 4.4 | 8800 | 1.1642 | 0.4329 | | 0.3762 | 4.5 | 9000 | 1.1507 | 0.4365 | | 0.3455 | 4.6 | 9200 | 1.2259 | 0.4337 | | 0.3398 | 4.7 | 9400 | 1.2888 | 0.4350 | | 0.3624 | 4.8 | 9600 | 1.2015 | 0.4364 | | 0.3392 | 4.9 | 9800 | 1.2045 | 0.4350 | | 0.339 | 5.0 | 10000 | 1.2082 | 0.4355 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.10.3
akoshel/made-ai-dungeon
f8559c01e873eacbb1278eb24b981f0ae0cb7c66
2021-11-06T07:50:15.000Z
[ "pytorch" ]
null
false
akoshel
null
akoshel/made-ai-dungeon
1
null
null
28,551
Entry not found
akozlo/con_bal60k
72fae7ef4fbb9671c9a19b8a0cb60de1e87a8064
2022-02-14T00:13:51.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
akozlo
null
akozlo/con_bal60k
1
null
transformers
28,552
hello
akshara23/Pegasus_for_Here
5d726f21102839f9992a15f54a77384680fad66b
2021-07-09T17:46:41.000Z
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
akshara23
null
akshara23/Pegasus_for_Here
1
null
transformers
28,553
Entry not found
akshaychaudhary/distilbert-base-uncased-finetuned-cloud-ner
ec8ceb1e9c413f68f8f90d2b3f271847b37ca4cf
2022-02-11T15:00:36.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
akshaychaudhary
null
akshaychaudhary/distilbert-base-uncased-finetuned-cloud-ner
1
null
transformers
28,554
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-cloud-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cloud-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0812 - Precision: 0.8975 - Recall: 0.9080 - F1: 0.9027 - Accuracy: 0.9703 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 166 | 0.1326 | 0.7990 | 0.8043 | 0.8017 | 0.9338 | | No log | 2.0 | 332 | 0.0925 | 0.8770 | 0.8946 | 0.8858 | 0.9618 | | No log | 3.0 | 498 | 0.0812 | 0.8975 | 0.9080 | 0.9027 | 0.9703 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
akshaychaudhary/distilbert-base-uncased-finetuned-cloud1-ner
f44b90a4493fbce870448e0a11a0b054097491b2
2022-02-14T13:30:57.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
akshaychaudhary
null
akshaychaudhary/distilbert-base-uncased-finetuned-cloud1-ner
1
null
transformers
28,555
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-cloud1-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cloud1-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0074 - Precision: 0.9714 - Recall: 0.9855 - F1: 0.9784 - Accuracy: 0.9972 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 166 | 0.0160 | 0.9653 | 0.9420 | 0.9535 | 0.9945 | | No log | 2.0 | 332 | 0.0089 | 0.9623 | 0.9855 | 0.9737 | 0.9965 | | No log | 3.0 | 498 | 0.0074 | 0.9714 | 0.9855 | 0.9784 | 0.9972 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
akshaychaudhary/distilbert-base-uncased-finetuned-ner
3ff90e6496e62c999b174d2b8d798024037c4a4e
2022-01-31T18:50:20.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
akshaychaudhary
null
akshaychaudhary/distilbert-base-uncased-finetuned-ner
1
null
transformers
28,556
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.9988 - Precision: 0.3 - Recall: 0.6 - F1: 0.4 - Accuracy: 0.7870 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 84 | 0.8399 | 0.2105 | 0.4 | 0.2759 | 0.75 | | No log | 2.0 | 168 | 0.9664 | 0.3 | 0.6 | 0.4 | 0.7870 | | No log | 3.0 | 252 | 0.9988 | 0.3 | 0.6 | 0.4 | 0.7870 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.2 - Tokenizers 0.11.0
alexrfelicio/t5-small-finetuned16-en-to-de
cb17f9201101aef9d69b0ab6e80d4b9bba0ade42
2021-12-02T23:08:06.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "dataset:wmt16", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
alexrfelicio
null
alexrfelicio/t5-small-finetuned16-en-to-de
1
null
transformers
28,557
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 model-index: - name: t5-small-finetuned16-en-to-de results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned16-en-to-de This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the wmt16 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:| | No log | 1.0 | 136 | 2.1906 | 23.3821 | 12.956 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu111 - Datasets 1.16.1 - Tokenizers 0.10.3
alexrfelicio/t5-small-finetuned300-en-to-de
20ea51fd33177801ad718a3c8e876f8a70e1889b
2021-12-02T22:08:10.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "dataset:wmt16", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
alexrfelicio
null
alexrfelicio/t5-small-finetuned300-en-to-de
1
null
transformers
28,558
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 model-index: - name: t5-small-finetuned300-en-to-de results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned300-en-to-de This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the wmt16 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:| | No log | 1.0 | 136 | 1.1454 | 14.2319 | 17.8329 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu111 - Datasets 1.16.1 - Tokenizers 0.10.3
alexrfelicio/t5-small-finetuned8-en-to-de
9827cad8ca156f4fc13492c91b10857b9c6e658d
2021-12-03T00:13:25.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "dataset:wmt16", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
alexrfelicio
null
alexrfelicio/t5-small-finetuned8-en-to-de
1
null
transformers
28,559
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wmt16 model-index: - name: t5-small-finetuned8-en-to-de results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned8-en-to-de This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the wmt16 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:| | No log | 1.0 | 136 | 3.6717 | 3.9127 | 4.0207 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu111 - Datasets 1.16.1 - Tokenizers 0.10.3
alexyalunin/RuBioBERT
048cc02fb23c7035d7c635e820c3d3cd4af85cd3
2022-01-24T16:30:44.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
alexyalunin
null
alexyalunin/RuBioBERT
1
null
transformers
28,560
Entry not found
ali2066/finetuned_token_2e-05_16_02_2022-14_15_41
0e87a0ad0394a2bbf099338296ad766a3c37e2cc
2022-02-16T13:18:14.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_16_02_2022-14_15_41
1
null
transformers
28,561
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_16_02_2022-14_15_41 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_16_02_2022-14_15_41 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1746 - Precision: 0.3191 - Recall: 0.3382 - F1: 0.3284 - Accuracy: 0.9439 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.2908 | 0.1104 | 0.1905 | 0.1398 | 0.8731 | | No log | 2.0 | 76 | 0.2253 | 0.1682 | 0.3206 | 0.2206 | 0.9114 | | No log | 3.0 | 114 | 0.2041 | 0.2069 | 0.3444 | 0.2585 | 0.9249 | | No log | 4.0 | 152 | 0.1974 | 0.2417 | 0.3603 | 0.2894 | 0.9269 | | No log | 5.0 | 190 | 0.1958 | 0.2707 | 0.3683 | 0.3120 | 0.9299 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_16_02_2022-14_23_23
00b22b82c05a52c686b1377c3098f89cd339e6bd
2022-02-16T13:25:42.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_16_02_2022-14_23_23
1
null
transformers
28,562
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_16_02_2022-14_23_23 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_16_02_2022-14_23_23 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1722 - Precision: 0.3378 - Recall: 0.3615 - F1: 0.3492 - Accuracy: 0.9448 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3781 | 0.1512 | 0.2671 | 0.1931 | 0.8216 | | No log | 2.0 | 76 | 0.3020 | 0.1748 | 0.2938 | 0.2192 | 0.8551 | | No log | 3.0 | 114 | 0.2723 | 0.1938 | 0.3339 | 0.2452 | 0.8663 | | No log | 4.0 | 152 | 0.2574 | 0.2119 | 0.3506 | 0.2642 | 0.8727 | | No log | 5.0 | 190 | 0.2521 | 0.2121 | 0.3623 | 0.2676 | 0.8756 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_16_02_2022-14_25_47
6385b1a5e7891e6eb5469251bbe52ec0341a466a
2022-02-16T13:28:05.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_16_02_2022-14_25_47
1
null
transformers
28,563
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_16_02_2022-14_25_47 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_16_02_2022-14_25_47 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1722 - Precision: 0.3378 - Recall: 0.3615 - F1: 0.3492 - Accuracy: 0.9448 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3781 | 0.1512 | 0.2671 | 0.1931 | 0.8216 | | No log | 2.0 | 76 | 0.3020 | 0.1748 | 0.2938 | 0.2192 | 0.8551 | | No log | 3.0 | 114 | 0.2723 | 0.1938 | 0.3339 | 0.2452 | 0.8663 | | No log | 4.0 | 152 | 0.2574 | 0.2119 | 0.3506 | 0.2642 | 0.8727 | | No log | 5.0 | 190 | 0.2521 | 0.2121 | 0.3623 | 0.2676 | 0.8756 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_16_02_2022-14_28_10
0d965ad4cd99ac7a10aea1f068476edcafb04807
2022-02-16T13:30:28.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_16_02_2022-14_28_10
1
null
transformers
28,564
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_16_02_2022-14_28_10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_16_02_2022-14_28_10 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1722 - Precision: 0.3378 - Recall: 0.3615 - F1: 0.3492 - Accuracy: 0.9448 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3781 | 0.1512 | 0.2671 | 0.1931 | 0.8216 | | No log | 2.0 | 76 | 0.3020 | 0.1748 | 0.2938 | 0.2192 | 0.8551 | | No log | 3.0 | 114 | 0.2723 | 0.1938 | 0.3339 | 0.2452 | 0.8663 | | No log | 4.0 | 152 | 0.2574 | 0.2119 | 0.3506 | 0.2642 | 0.8727 | | No log | 5.0 | 190 | 0.2521 | 0.2121 | 0.3623 | 0.2676 | 0.8756 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_16_02_2022-14_30_32
000849a7a6eadc53fc171f3cd8926883611ecdab
2022-02-16T13:32:52.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_16_02_2022-14_30_32
1
null
transformers
28,565
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_16_02_2022-14_30_32 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_16_02_2022-14_30_32 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1722 - Precision: 0.3378 - Recall: 0.3615 - F1: 0.3492 - Accuracy: 0.9448 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3781 | 0.1512 | 0.2671 | 0.1931 | 0.8216 | | No log | 2.0 | 76 | 0.3020 | 0.1748 | 0.2938 | 0.2192 | 0.8551 | | No log | 3.0 | 114 | 0.2723 | 0.1938 | 0.3339 | 0.2452 | 0.8663 | | No log | 4.0 | 152 | 0.2574 | 0.2119 | 0.3506 | 0.2642 | 0.8727 | | No log | 5.0 | 190 | 0.2521 | 0.2121 | 0.3623 | 0.2676 | 0.8756 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_16_02_2022-14_35_19
6aece39a62b65c9047c301e9a53acdf5ad383057
2022-02-16T13:37:37.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_16_02_2022-14_35_19
1
null
transformers
28,566
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_16_02_2022-14_35_19 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_16_02_2022-14_35_19 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1722 - Precision: 0.3378 - Recall: 0.3615 - F1: 0.3492 - Accuracy: 0.9448 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3781 | 0.1512 | 0.2671 | 0.1931 | 0.8216 | | No log | 2.0 | 76 | 0.3020 | 0.1748 | 0.2938 | 0.2192 | 0.8551 | | No log | 3.0 | 114 | 0.2723 | 0.1938 | 0.3339 | 0.2452 | 0.8663 | | No log | 4.0 | 152 | 0.2574 | 0.2119 | 0.3506 | 0.2642 | 0.8727 | | No log | 5.0 | 190 | 0.2521 | 0.2121 | 0.3623 | 0.2676 | 0.8756 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-15_41_15
a156441da779ba3b3eb3bbb15ff89069ecbbf5b1
2022-02-16T14:43:38.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-15_41_15
1
null
transformers
28,567
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-15_41_15 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-15_41_15 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1742 - Precision: 0.3447 - Recall: 0.3410 - F1: 0.3428 - Accuracy: 0.9455 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3692 | 0.0868 | 0.2030 | 0.1216 | 0.8238 | | No log | 2.0 | 76 | 0.3198 | 0.1674 | 0.3029 | 0.2157 | 0.8567 | | No log | 3.0 | 114 | 0.3156 | 0.1520 | 0.3096 | 0.2039 | 0.8510 | | No log | 4.0 | 152 | 0.3129 | 0.1753 | 0.3266 | 0.2281 | 0.8500 | | No log | 5.0 | 190 | 0.3038 | 0.1716 | 0.3401 | 0.2281 | 0.8595 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-15_43_42
e19257727c4a0e84e938fa684e630d76fa05275d
2022-02-16T14:46:02.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-15_43_42
1
null
transformers
28,568
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-15_43_42 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-15_43_42 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-15_46_07
a4e622bc3b37d0a17579efca4ac6783c41c48d74
2022-02-16T14:48:28.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-15_46_07
1
null
transformers
28,569
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-15_46_07 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-15_46_07 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-15_48_32
06c5948ea8178c98599daf101c3f09c79079e393
2022-02-16T14:50:50.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-15_48_32
1
null
transformers
28,570
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-15_48_32 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-15_48_32 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-15_50_54
f24944c4c3adb4afd97fca9094d2d7a3f9db0b73
2022-02-16T14:53:12.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-15_50_54
1
null
transformers
28,571
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-15_50_54 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-15_50_54 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-15_53_17
7dd54a510d0a302c5261bbc35cd86c44a2559f96
2022-02-16T14:56:28.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-15_53_17
1
null
transformers
28,572
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-15_53_17 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-15_53_17 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-15_56_33
2024c8521acbe5978bff566f1ba726bf3b72d8df
2022-02-16T14:59:46.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-15_56_33
1
null
transformers
28,573
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-15_56_33 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-15_56_33 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-15_59_50
50973bda8898fd0daa24a6666ef69184e8e2596c
2022-02-16T15:03:01.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-15_59_50
1
null
transformers
28,574
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-15_59_50 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-15_59_50 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-16_03_05
a78a5b4dbe982b837d2b4575f80e333b09064e82
2022-02-16T15:06:16.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-16_03_05
1
null
transformers
28,575
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-16_03_05 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-16_03_05 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_2e-05_all_16_02_2022-16_06_20
763c926722bd1449f5611326c66a781a053b8443
2022-02-16T15:09:31.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_2e-05_all_16_02_2022-16_06_20
1
null
transformers
28,576
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_2e-05_all_16_02_2022-16_06_20 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_2e-05_all_16_02_2022-16_06_20 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1750 - Precision: 0.3286 - Recall: 0.3334 - F1: 0.3310 - Accuracy: 0.9447 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3355 | 0.0975 | 0.2358 | 0.1380 | 0.8361 | | No log | 2.0 | 76 | 0.3177 | 0.1359 | 0.2709 | 0.1810 | 0.8398 | | No log | 3.0 | 114 | 0.3000 | 0.1542 | 0.3043 | 0.2047 | 0.8471 | | No log | 4.0 | 152 | 0.3033 | 0.1589 | 0.3060 | 0.2091 | 0.8434 | | No log | 5.0 | 190 | 0.3029 | 0.1629 | 0.3110 | 0.2138 | 0.8447 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_3e-05_all_16_02_2022-16_09_36
0f288fadd2d8b40326fef9d47cd120750d0d2b42
2022-02-16T15:12:47.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_3e-05_all_16_02_2022-16_09_36
1
null
transformers
28,577
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_3e-05_all_16_02_2022-16_09_36 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_3e-05_all_16_02_2022-16_09_36 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1630 - Precision: 0.3684 - Recall: 0.3714 - F1: 0.3699 - Accuracy: 0.9482 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3339 | 0.1075 | 0.2324 | 0.1470 | 0.8379 | | No log | 2.0 | 76 | 0.3074 | 0.1589 | 0.2926 | 0.2060 | 0.8489 | | No log | 3.0 | 114 | 0.2914 | 0.2142 | 0.3278 | 0.2591 | 0.8591 | | No log | 4.0 | 152 | 0.2983 | 0.1951 | 0.3595 | 0.2529 | 0.8454 | | No log | 5.0 | 190 | 0.2997 | 0.1851 | 0.3528 | 0.2428 | 0.8487 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_3e-05_all_16_02_2022-16_12_51
1a9c34499060f2336344c96a521fb057e9fce13c
2022-02-16T15:16:04.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_3e-05_all_16_02_2022-16_12_51
1
null
transformers
28,578
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_3e-05_all_16_02_2022-16_12_51 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_3e-05_all_16_02_2022-16_12_51 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1630 - Precision: 0.3684 - Recall: 0.3714 - F1: 0.3699 - Accuracy: 0.9482 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3339 | 0.1075 | 0.2324 | 0.1470 | 0.8379 | | No log | 2.0 | 76 | 0.3074 | 0.1589 | 0.2926 | 0.2060 | 0.8489 | | No log | 3.0 | 114 | 0.2914 | 0.2142 | 0.3278 | 0.2591 | 0.8591 | | No log | 4.0 | 152 | 0.2983 | 0.1951 | 0.3595 | 0.2529 | 0.8454 | | No log | 5.0 | 190 | 0.2997 | 0.1851 | 0.3528 | 0.2428 | 0.8487 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_3e-05_all_16_02_2022-16_16_08
6af63a234f3f529c9cf3090731b4047ed160aa0d
2022-02-16T15:19:19.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_3e-05_all_16_02_2022-16_16_08
1
null
transformers
28,579
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_3e-05_all_16_02_2022-16_16_08 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_3e-05_all_16_02_2022-16_16_08 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1630 - Precision: 0.3684 - Recall: 0.3714 - F1: 0.3699 - Accuracy: 0.9482 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3339 | 0.1075 | 0.2324 | 0.1470 | 0.8379 | | No log | 2.0 | 76 | 0.3074 | 0.1589 | 0.2926 | 0.2060 | 0.8489 | | No log | 3.0 | 114 | 0.2914 | 0.2142 | 0.3278 | 0.2591 | 0.8591 | | No log | 4.0 | 152 | 0.2983 | 0.1951 | 0.3595 | 0.2529 | 0.8454 | | No log | 5.0 | 190 | 0.2997 | 0.1851 | 0.3528 | 0.2428 | 0.8487 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_3e-05_all_16_02_2022-16_22_39
aef4818f02b3e935d9e473b3595bb530990d2c98
2022-02-16T15:25:52.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_3e-05_all_16_02_2022-16_22_39
1
null
transformers
28,580
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_3e-05_all_16_02_2022-16_22_39 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_3e-05_all_16_02_2022-16_22_39 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1630 - Precision: 0.3684 - Recall: 0.3714 - F1: 0.3699 - Accuracy: 0.9482 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3339 | 0.1075 | 0.2324 | 0.1470 | 0.8379 | | No log | 2.0 | 76 | 0.3074 | 0.1589 | 0.2926 | 0.2060 | 0.8489 | | No log | 3.0 | 114 | 0.2914 | 0.2142 | 0.3278 | 0.2591 | 0.8591 | | No log | 4.0 | 152 | 0.2983 | 0.1951 | 0.3595 | 0.2529 | 0.8454 | | No log | 5.0 | 190 | 0.2997 | 0.1851 | 0.3528 | 0.2428 | 0.8487 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_3e-05_all_16_02_2022-16_25_56
ca466dff28b3e5ed333ee9b4790415c37942c84c
2022-02-16T15:29:08.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_3e-05_all_16_02_2022-16_25_56
1
null
transformers
28,581
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_3e-05_all_16_02_2022-16_25_56 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_3e-05_all_16_02_2022-16_25_56 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1630 - Precision: 0.3684 - Recall: 0.3714 - F1: 0.3699 - Accuracy: 0.9482 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3339 | 0.1075 | 0.2324 | 0.1470 | 0.8379 | | No log | 2.0 | 76 | 0.3074 | 0.1589 | 0.2926 | 0.2060 | 0.8489 | | No log | 3.0 | 114 | 0.2914 | 0.2142 | 0.3278 | 0.2591 | 0.8591 | | No log | 4.0 | 152 | 0.2983 | 0.1951 | 0.3595 | 0.2529 | 0.8454 | | No log | 5.0 | 190 | 0.2997 | 0.1851 | 0.3528 | 0.2428 | 0.8487 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_3e-05_all_16_02_2022-16_29_13
91ef0bd2f10ce6354604d0dd7c8930705c051708
2022-02-16T15:32:26.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_3e-05_all_16_02_2022-16_29_13
1
null
transformers
28,582
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_3e-05_all_16_02_2022-16_29_13 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_3e-05_all_16_02_2022-16_29_13 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1630 - Precision: 0.3684 - Recall: 0.3714 - F1: 0.3699 - Accuracy: 0.9482 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3339 | 0.1075 | 0.2324 | 0.1470 | 0.8379 | | No log | 2.0 | 76 | 0.3074 | 0.1589 | 0.2926 | 0.2060 | 0.8489 | | No log | 3.0 | 114 | 0.2914 | 0.2142 | 0.3278 | 0.2591 | 0.8591 | | No log | 4.0 | 152 | 0.2983 | 0.1951 | 0.3595 | 0.2529 | 0.8454 | | No log | 5.0 | 190 | 0.2997 | 0.1851 | 0.3528 | 0.2428 | 0.8487 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_0.0002_all_16_02_2022-20_14_27
e78c3e63c985082725df94704cbfc7cb5f735e39
2022-02-16T19:16:44.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_0.0002_all_16_02_2022-20_14_27
1
null
transformers
28,583
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_0.0002_all_16_02_2022-20_14_27 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_0.0002_all_16_02_2022-20_14_27 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1588 - Precision: 0.4510 - Recall: 0.5622 - F1: 0.5005 - Accuracy: 0.9477 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.2896 | 0.1483 | 0.1981 | 0.1696 | 0.8745 | | No log | 2.0 | 76 | 0.2553 | 0.2890 | 0.3604 | 0.3207 | 0.8918 | | No log | 3.0 | 114 | 0.2507 | 0.246 | 0.4642 | 0.3216 | 0.8925 | | No log | 4.0 | 152 | 0.2540 | 0.2428 | 0.4792 | 0.3223 | 0.8922 | | No log | 5.0 | 190 | 0.2601 | 0.2747 | 0.4717 | 0.3472 | 0.8965 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_0.0002_all_16_02_2022-21_13_10
c716c9fa4cb7873b997af2f9d24ba9453f23f818
2022-02-16T20:15:07.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_0.0002_all_16_02_2022-21_13_10
1
null
transformers
28,584
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_0.0002_all_16_02_2022-21_13_10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_0.0002_all_16_02_2022-21_13_10 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3057 - Precision: 0.2857 - Recall: 0.4508 - F1: 0.3497 - Accuracy: 0.8741 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 30 | 0.3018 | 0.2097 | 0.2546 | 0.2300 | 0.8727 | | No log | 2.0 | 60 | 0.2337 | 0.3444 | 0.3652 | 0.3545 | 0.9024 | | No log | 3.0 | 90 | 0.2198 | 0.3463 | 0.3869 | 0.3655 | 0.9070 | | No log | 4.0 | 120 | 0.2112 | 0.3757 | 0.4405 | 0.4056 | 0.9173 | | No log | 5.0 | 150 | 0.2131 | 0.4163 | 0.5126 | 0.4595 | 0.9212 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_0.0002_editorials_16_02_2022-21_07_38
e7894ae526b9fdb639d649fc9206e305543ad1e9
2022-02-16T20:08:50.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_0.0002_editorials_16_02_2022-21_07_38
1
null
transformers
28,585
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_0.0002_editorials_16_02_2022-21_07_38 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_0.0002_editorials_16_02_2022-21_07_38 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1146 - Precision: 0.4662 - Recall: 0.4718 - F1: 0.4690 - Accuracy: 0.9773 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 15 | 0.0756 | 0.2960 | 0.4505 | 0.3573 | 0.9775 | | No log | 2.0 | 30 | 0.0626 | 0.3615 | 0.4231 | 0.3899 | 0.9808 | | No log | 3.0 | 45 | 0.0602 | 0.4898 | 0.5275 | 0.5079 | 0.9833 | | No log | 4.0 | 60 | 0.0719 | 0.5517 | 0.5275 | 0.5393 | 0.9849 | | No log | 5.0 | 75 | 0.0754 | 0.5765 | 0.5385 | 0.5568 | 0.9849 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_0.0002_essays_16_02_2022-21_04_02
f860f920c0306a71db781a4cf1a7823767d5d473
2022-02-16T20:05:00.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_0.0002_essays_16_02_2022-21_04_02
1
null
transformers
28,586
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_0.0002_essays_16_02_2022-21_04_02 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_0.0002_essays_16_02_2022-21_04_02 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2158 - Precision: 0.5814 - Recall: 0.7073 - F1: 0.6382 - Accuracy: 0.9248 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 11 | 0.3920 | 0.4392 | 0.6069 | 0.5096 | 0.8593 | | No log | 2.0 | 22 | 0.3304 | 0.4282 | 0.6260 | 0.5085 | 0.8672 | | No log | 3.0 | 33 | 0.3361 | 0.4840 | 0.6336 | 0.5488 | 0.8685 | | No log | 4.0 | 44 | 0.3258 | 0.5163 | 0.6641 | 0.5810 | 0.8722 | | No log | 5.0 | 55 | 0.3472 | 0.5192 | 0.6718 | 0.5857 | 0.8743 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_0.0002_webDiscourse_16_02_2022-21_00_50
b4a8017bd5a3c1f69b1359ce2194a759b598afdf
2022-02-16T20:01:47.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_0.0002_webDiscourse_16_02_2022-21_00_50
1
null
transformers
28,587
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_0.0002_webDiscourse_16_02_2022-21_00_50 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_0.0002_webDiscourse_16_02_2022-21_00_50 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5530 - Precision: 0.0044 - Recall: 0.0182 - F1: 0.0071 - Accuracy: 0.7268 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 10 | 0.7051 | 0.0645 | 0.0323 | 0.0430 | 0.4465 | | No log | 2.0 | 20 | 0.6928 | 0.0476 | 0.0161 | 0.0241 | 0.5546 | | No log | 3.0 | 30 | 0.6875 | 0.0069 | 0.0484 | 0.0120 | 0.5533 | | No log | 4.0 | 40 | 0.6966 | 0.0064 | 0.0323 | 0.0107 | 0.5832 | | No log | 5.0 | 50 | 0.7093 | 0.0061 | 0.0323 | 0.0102 | 0.5742 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_2e-05_all_16_02_2022-20_09_36
322530910efe413020bb55fe7bd71f0be85995b7
2022-02-16T19:11:58.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_2e-05_all_16_02_2022-20_09_36
1
null
transformers
28,588
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_2e-05_all_16_02_2022-20_09_36 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_2e-05_all_16_02_2022-20_09_36 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1743 - Precision: 0.3429 - Recall: 0.3430 - F1: 0.3430 - Accuracy: 0.9446 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3322 | 0.0703 | 0.1790 | 0.1010 | 0.8318 | | No log | 2.0 | 76 | 0.2644 | 0.1180 | 0.2343 | 0.1570 | 0.8909 | | No log | 3.0 | 114 | 0.2457 | 0.1624 | 0.2583 | 0.1994 | 0.8980 | | No log | 4.0 | 152 | 0.2487 | 0.1486 | 0.2583 | 0.1887 | 0.8931 | | No log | 5.0 | 190 | 0.2395 | 0.1670 | 0.2694 | 0.2062 | 0.8988 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_2e-05_all_16_02_2022-20_25_06
ef893b55633d55592118b81ec6808e17ce270793
2022-02-16T19:27:31.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_2e-05_all_16_02_2022-20_25_06
1
null
transformers
28,589
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_2e-05_all_16_02_2022-20_25_06 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_2e-05_all_16_02_2022-20_25_06 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1778 - Precision: 0.3270 - Recall: 0.3348 - F1: 0.3309 - Accuracy: 0.9439 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.4023 | 0.1050 | 0.2331 | 0.1448 | 0.8121 | | No log | 2.0 | 76 | 0.3629 | 0.1856 | 0.3414 | 0.2405 | 0.8368 | | No log | 3.0 | 114 | 0.3329 | 0.1794 | 0.3594 | 0.2394 | 0.8504 | | No log | 4.0 | 152 | 0.3261 | 0.1786 | 0.3684 | 0.2405 | 0.8503 | | No log | 5.0 | 190 | 0.3244 | 0.1872 | 0.3684 | 0.2482 | 0.8534 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_2e-05_all_16_02_2022-20_40_28
f27c8bcf8086c1d4dc21a29b0baadf37ab275c13
2022-02-16T19:42:54.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_2e-05_all_16_02_2022-20_40_28
1
null
transformers
28,590
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_2e-05_all_16_02_2022-20_40_28 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_2e-05_all_16_02_2022-20_40_28 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1736 - Precision: 0.3358 - Recall: 0.3447 - F1: 0.3402 - Accuracy: 0.9452 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3058 | 0.1200 | 0.2102 | 0.1528 | 0.8629 | | No log | 2.0 | 76 | 0.2488 | 0.1605 | 0.2774 | 0.2034 | 0.9003 | | No log | 3.0 | 114 | 0.2296 | 0.1947 | 0.2880 | 0.2324 | 0.9057 | | No log | 4.0 | 152 | 0.2208 | 0.2201 | 0.2986 | 0.2534 | 0.9113 | | No log | 5.0 | 190 | 0.2235 | 0.2110 | 0.3039 | 0.2491 | 0.9101 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_2e-05_essays_16_02_2022-21_01_51
863b750f6844623f943c5dd9b2ab4717ccc3e292
2022-02-16T20:02:54.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_2e-05_essays_16_02_2022-21_01_51
1
null
transformers
28,591
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_2e-05_essays_16_02_2022-21_01_51 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_2e-05_essays_16_02_2022-21_01_51 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2525 - Precision: 0.3997 - Recall: 0.5117 - F1: 0.4488 - Accuracy: 0.9115 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 11 | 0.4652 | 0.1528 | 0.3588 | 0.2144 | 0.7851 | | No log | 2.0 | 22 | 0.3646 | 0.2913 | 0.4847 | 0.3639 | 0.8521 | | No log | 3.0 | 33 | 0.3453 | 0.3789 | 0.5611 | 0.4523 | 0.8708 | | No log | 4.0 | 44 | 0.3270 | 0.3673 | 0.5496 | 0.4404 | 0.8729 | | No log | 5.0 | 55 | 0.3268 | 0.4011 | 0.5725 | 0.4717 | 0.8760 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_2e-05_webDiscourse_16_02_2022-20_58_45
30cd42f1f99919ae82947c631eb3839b36b4c320
2022-02-16T19:59:45.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_2e-05_webDiscourse_16_02_2022-20_58_45
1
null
transformers
28,592
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_2e-05_webDiscourse_16_02_2022-20_58_45 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_2e-05_webDiscourse_16_02_2022-20_58_45 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6373 - Precision: 0.0024 - Recall: 0.0072 - F1: 0.0036 - Accuracy: 0.6329 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:---:|:--------:| | No log | 1.0 | 10 | 0.5913 | 0.0 | 0.0 | 0.0 | 0.7023 | | No log | 2.0 | 20 | 0.5833 | 0.0 | 0.0 | 0.0 | 0.7062 | | No log | 3.0 | 30 | 0.5717 | 0.0 | 0.0 | 0.0 | 0.7059 | | No log | 4.0 | 40 | 0.5696 | 0.0 | 0.0 | 0.0 | 0.7008 | | No log | 5.0 | 50 | 0.5669 | 0.0 | 0.0 | 0.0 | 0.7010 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_3e-05_all_16_02_2022-20_12_04
69b8a2e9a9b7da13f4e2238ecf1e5739bd376269
2022-02-16T19:14:21.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_3e-05_all_16_02_2022-20_12_04
1
null
transformers
28,593
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_3e-05_all_16_02_2022-20_12_04 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_3e-05_all_16_02_2022-20_12_04 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1620 - Precision: 0.3509 - Recall: 0.3793 - F1: 0.3646 - Accuracy: 0.9468 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.2997 | 0.1125 | 0.2057 | 0.1454 | 0.8669 | | No log | 2.0 | 76 | 0.2620 | 0.1928 | 0.2849 | 0.2300 | 0.8899 | | No log | 3.0 | 114 | 0.2497 | 0.1923 | 0.2906 | 0.2314 | 0.8918 | | No log | 4.0 | 152 | 0.2474 | 0.1819 | 0.3377 | 0.2365 | 0.8905 | | No log | 5.0 | 190 | 0.2418 | 0.2128 | 0.3264 | 0.2576 | 0.8997 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_3e-05_all_16_02_2022-20_27_36
423456efa9ffc86ddc951274237bdb18385d5edc
2022-02-16T19:29:55.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_3e-05_all_16_02_2022-20_27_36
1
null
transformers
28,594
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_3e-05_all_16_02_2022-20_27_36 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_3e-05_all_16_02_2022-20_27_36 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1633 - Precision: 0.3632 - Recall: 0.3786 - F1: 0.3707 - Accuracy: 0.9482 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3227 | 0.1237 | 0.2397 | 0.1631 | 0.8566 | | No log | 2.0 | 76 | 0.2874 | 0.2128 | 0.3328 | 0.2596 | 0.8721 | | No log | 3.0 | 114 | 0.2762 | 0.2170 | 0.3603 | 0.2709 | 0.8844 | | No log | 4.0 | 152 | 0.2770 | 0.2274 | 0.3690 | 0.2814 | 0.8819 | | No log | 5.0 | 190 | 0.2771 | 0.2113 | 0.3741 | 0.2701 | 0.8823 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_3e-05_all_16_02_2022-20_43_00
dd5e81efb78b2458f7f80fda1c56afcca7c48aa3
2022-02-16T19:45:21.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_3e-05_all_16_02_2022-20_43_00
1
null
transformers
28,595
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_3e-05_all_16_02_2022-20_43_00 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_3e-05_all_16_02_2022-20_43_00 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1626 - Precision: 0.3811 - Recall: 0.3865 - F1: 0.3838 - Accuracy: 0.9482 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 38 | 0.3697 | 0.0933 | 0.2235 | 0.1317 | 0.8259 | | No log | 2.0 | 76 | 0.3193 | 0.1266 | 0.2948 | 0.1771 | 0.8494 | | No log | 3.0 | 114 | 0.3025 | 0.1606 | 0.3160 | 0.2130 | 0.8540 | | No log | 4.0 | 152 | 0.2978 | 0.1867 | 0.3449 | 0.2422 | 0.8605 | | No log | 5.0 | 190 | 0.2984 | 0.1706 | 0.3507 | 0.2295 | 0.8551 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_3e-05_all_16_02_2022-21_11_08
23b702f5b6cef35083f21d052f5f884ebcce1432
2022-02-16T20:13:06.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_3e-05_all_16_02_2022-21_11_08
1
null
transformers
28,596
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_3e-05_all_16_02_2022-21_11_08 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_3e-05_all_16_02_2022-21_11_08 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2731 - Precision: 0.1928 - Recall: 0.3457 - F1: 0.2475 - Accuracy: 0.8826 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 30 | 0.3010 | 0.1330 | 0.2345 | 0.1697 | 0.8707 | | No log | 2.0 | 60 | 0.2446 | 0.1739 | 0.2948 | 0.2188 | 0.8949 | | No log | 3.0 | 90 | 0.2235 | 0.2446 | 0.3032 | 0.2708 | 0.9080 | | No log | 4.0 | 120 | 0.2226 | 0.2670 | 0.3350 | 0.2972 | 0.9058 | | No log | 5.0 | 150 | 0.2166 | 0.2779 | 0.3417 | 0.3065 | 0.9063 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_3e-05_editorials_16_02_2022-21_06_22
329ea18c807ce0a25e8c7315406c7252b8252abd
2022-02-16T20:07:34.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_3e-05_editorials_16_02_2022-21_06_22
1
null
transformers
28,597
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_3e-05_editorials_16_02_2022-21_06_22 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_3e-05_editorials_16_02_2022-21_06_22 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1060 - Precision: 0.2003 - Recall: 0.1154 - F1: 0.1464 - Accuracy: 0.9712 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 15 | 0.0897 | 0.08 | 0.0110 | 0.0193 | 0.9801 | | No log | 2.0 | 30 | 0.0798 | 0.08 | 0.0110 | 0.0193 | 0.9801 | | No log | 3.0 | 45 | 0.0743 | 0.08 | 0.0110 | 0.0193 | 0.9801 | | No log | 4.0 | 60 | 0.0707 | 0.0741 | 0.0110 | 0.0191 | 0.9802 | | No log | 5.0 | 75 | 0.0696 | 0.2727 | 0.1648 | 0.2055 | 0.9805 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_3e-05_essays_16_02_2022-21_02_59
e8ebfac665ccb1270df4e032bc82442c0e109a09
2022-02-16T20:03:57.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_3e-05_essays_16_02_2022-21_02_59
1
null
transformers
28,598
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_3e-05_essays_16_02_2022-21_02_59 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_3e-05_essays_16_02_2022-21_02_59 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2374 - Precision: 0.4766 - Recall: 0.5549 - F1: 0.5127 - Accuracy: 0.9173 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 11 | 0.4155 | 0.1569 | 0.3168 | 0.2099 | 0.8163 | | No log | 2.0 | 22 | 0.3584 | 0.3827 | 0.5725 | 0.4587 | 0.8691 | | No log | 3.0 | 33 | 0.3483 | 0.4353 | 0.5649 | 0.4917 | 0.8737 | | No log | 4.0 | 44 | 0.3187 | 0.4403 | 0.5916 | 0.5049 | 0.8770 | | No log | 5.0 | 55 | 0.3188 | 0.4463 | 0.6031 | 0.5130 | 0.8806 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3
ali2066/finetuned_token_itr0_3e-05_webDiscourse_16_02_2022-20_59_50
7ceb224a788e221b2a54d544599b2530fb60ef39
2022-02-16T20:00:45.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ali2066
null
ali2066/finetuned_token_itr0_3e-05_webDiscourse_16_02_2022-20_59_50
1
null
transformers
28,599
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned_token_itr0_3e-05_webDiscourse_16_02_2022-20_59_50 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned_token_itr0_3e-05_webDiscourse_16_02_2022-20_59_50 This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5450 - Precision: 0.0049 - Recall: 0.0146 - F1: 0.0074 - Accuracy: 0.7431 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 10 | 0.6830 | 0.0109 | 0.0323 | 0.0163 | 0.5685 | | No log | 2.0 | 20 | 0.7187 | 0.0256 | 0.0323 | 0.0286 | 0.5668 | | No log | 3.0 | 30 | 0.6839 | 0.0076 | 0.0484 | 0.0131 | 0.5848 | | No log | 4.0 | 40 | 0.6988 | 0.0092 | 0.0484 | 0.0155 | 0.5918 | | No log | 5.0 | 50 | 0.7055 | 0.0100 | 0.0484 | 0.0165 | 0.5946 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.1+cu113 - Datasets 1.18.0 - Tokenizers 0.10.3