modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
list
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
mbshr/urt5-base
1396b3e791a3a2249b7f7670c3b711a8888ee970
2022-06-26T17:13:24.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
mbshr
null
mbshr/urt5-base
6
null
transformers
15,800
Entry not found
Moo/kobart-counsel-sum
d3faf0ada35dd49d4adf9489805425ab0079ef29
2022-06-27T02:13:57.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
Moo
null
Moo/kobart-counsel-sum
6
null
transformers
15,801
--- license: apache-2.0 ---
Parsa/Drug_Induced_Liver_Injury_classification
50993e7c3647ac12e62646ffb6aab303432a07e2
2022-06-27T03:47:17.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
Parsa
null
Parsa/Drug_Induced_Liver_Injury_classification
6
null
transformers
15,802
Entry not found
ajders/distilled_wav2vec2_xls_r_300m
bf3547d7501359440fc05dbff328b0a45510fc14
2022-07-04T13:18:25.000Z
[ "pytorch", "wav2vec2", "pretraining", "transformers" ]
null
false
ajders
null
ajders/distilled_wav2vec2_xls_r_300m
6
null
transformers
15,803
Entry not found
sanskar/JewelleryReviews
bf1b40bca7815ca8419db7cd3d08293f51d2fca0
2022-06-27T13:42:08.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
sanskar
null
sanskar/JewelleryReviews
6
null
transformers
15,804
Entry not found
annahaz/distilbert-base-multilingual-cased-finetuned-misogyny-multilingual
2c7c7a0bbce67be716df042710b36c90209bef37
2022-06-28T22:01:28.000Z
[ "pytorch", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
annahaz
null
annahaz/distilbert-base-multilingual-cased-finetuned-misogyny-multilingual
6
null
transformers
15,805
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: distilbert-base-multilingual-cased-finetuned-misogyny-multilingual results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-multilingual-cased-finetuned-misogyny-multilingual This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.9917 - Accuracy: 0.8808 - F1: 0.7543 - Precision: 0.7669 - Recall: 0.7421 - Mae: 0.1192 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | Mae | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:---------:|:------:|:------:| | 0.3366 | 1.0 | 1407 | 0.3297 | 0.8630 | 0.6862 | 0.7886 | 0.6073 | 0.1370 | | 0.2371 | 2.0 | 2814 | 0.3423 | 0.8802 | 0.7468 | 0.7802 | 0.7161 | 0.1198 | | 0.1714 | 3.0 | 4221 | 0.4373 | 0.8749 | 0.7351 | 0.7693 | 0.7039 | 0.1251 | | 0.1161 | 4.0 | 5628 | 0.5584 | 0.8699 | 0.7525 | 0.7089 | 0.8019 | 0.1301 | | 0.0646 | 5.0 | 7035 | 0.7005 | 0.8788 | 0.7357 | 0.7961 | 0.6837 | 0.1212 | | 0.0539 | 6.0 | 8442 | 0.7866 | 0.8710 | 0.7465 | 0.7243 | 0.7702 | 0.1290 | | 0.0336 | 7.0 | 9849 | 0.8967 | 0.8783 | 0.7396 | 0.7828 | 0.7010 | 0.1217 | | 0.0202 | 8.0 | 11256 | 0.9053 | 0.8810 | 0.7472 | 0.7845 | 0.7133 | 0.1190 | | 0.018 | 9.0 | 12663 | 0.9785 | 0.8792 | 0.7478 | 0.7706 | 0.7262 | 0.1208 | | 0.0069 | 10.0 | 14070 | 0.9917 | 0.8808 | 0.7543 | 0.7669 | 0.7421 | 0.1192 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.9.0+cu111 - Datasets 2.3.2 - Tokenizers 0.12.1
Jeevesh8/goog_bert_ft_cola-0
95fef8e89dce02354c26255948bb9a9a80a22f41
2022-06-29T17:31:49.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Jeevesh8
null
Jeevesh8/goog_bert_ft_cola-0
6
null
transformers
15,806
Entry not found
domenicrosati/deberta-xsmall-dapt-scientific-papers-pubmed
da3960403aecaab056295a8555245afade351ffd
2022-06-29T21:30:24.000Z
[ "pytorch", "tensorboard", "deberta-v2", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
domenicrosati
null
domenicrosati/deberta-xsmall-dapt-scientific-papers-pubmed
6
null
transformers
15,807
Entry not found
ps29/distilbert-base-uncased-finetuned-emotion
7dac28b0a21bc74a409727ba58a6e0c0aa1b1a1f
2022-06-30T05:45:12.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:emotion", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
ps29
null
ps29/distilbert-base-uncased-finetuned-emotion
6
null
transformers
15,808
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion args: default metrics: - name: Accuracy type: accuracy value: 0.925 - name: F1 type: f1 value: 0.9249836806712254 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2130 - Accuracy: 0.925 - F1: 0.9250 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8035 | 1.0 | 250 | 0.3075 | 0.908 | 0.9063 | | 0.2445 | 2.0 | 500 | 0.2130 | 0.925 | 0.9250 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
abhishek/autotrain-imdbtestmodel-9215210
93e9d340ee9f63cff89d5470b3a451df115822dc
2022-06-30T13:36:05.000Z
[ "pytorch", "bert", "text-classification", "en", "dataset:abhishek/autotrain-data-imdbtestmodel", "transformers", "autotrain", "co2_eq_emissions" ]
text-classification
false
abhishek
null
abhishek/autotrain-imdbtestmodel-9215210
6
null
transformers
15,809
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - abhishek/autotrain-data-imdbtestmodel co2_eq_emissions: 0.2757084122251468 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 9215210 - CO2 Emissions (in grams): 0.2757084122251468 ## Validation Metrics - Loss: 0.1699502319097519 - Accuracy: 0.9372 - Precision: 0.9277551659361303 - Recall: 0.94824 - AUC: 0.9837227744 - F1: 0.9378857414147808 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/abhishek/autotrain-imdbtestmodel-9215210 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("abhishek/autotrain-imdbtestmodel-9215210", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("abhishek/autotrain-imdbtestmodel-9215210", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
Gunulhona/tbSTmodel_v1
f53232ac2f626fa72ed654fc5c3fc91e39a89419
2022-07-02T15:05:19.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Gunulhona
null
Gunulhona/tbSTmodel_v1
6
null
transformers
15,810
Entry not found
Gunulhona/tbscmodel_v1
ebfb9c96759ae4f2898b77ff9ee3423f4680554c
2022-06-30T15:53:21.000Z
[ "pytorch", "bart", "text-classification", "transformers" ]
text-classification
false
Gunulhona
null
Gunulhona/tbscmodel_v1
6
null
transformers
15,811
Entry not found
Evelyn18/distilbert-base-uncased-becas-3
687997b2539b030e878690556b985449a7b5c46a
2022-07-02T03:06:25.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:becasv2", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Evelyn18
null
Evelyn18/distilbert-base-uncased-becas-3
6
null
transformers
15,812
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becas-3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becas-3 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 5.9817 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 20 - eval_batch_size: 20 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 4 | 4.7485 | | No log | 2.0 | 8 | 4.9898 | | No log | 3.0 | 12 | 4.5283 | | No log | 4.0 | 16 | 5.2474 | | No log | 5.0 | 20 | 5.7884 | | No log | 6.0 | 24 | 5.7276 | | No log | 7.0 | 28 | 6.1736 | | No log | 8.0 | 32 | 6.2020 | | No log | 9.0 | 36 | 5.9669 | | No log | 10.0 | 40 | 5.9817 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Evelyn18/distilbert-base-uncased-becas-6
81d89c4ed20842b7ed409cb5f160638bd0cff66a
2022-07-02T03:41:02.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "dataset:becasv2", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Evelyn18
null
Evelyn18/distilbert-base-uncased-becas-6
6
null
transformers
15,813
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becas-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becas-6 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 4.4429 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 4 | 5.7244 | | No log | 2.0 | 8 | 5.3950 | | No log | 3.0 | 12 | 5.1709 | | No log | 4.0 | 16 | 4.9720 | | No log | 5.0 | 20 | 4.7402 | | No log | 6.0 | 24 | 4.5832 | | No log | 7.0 | 28 | 4.5499 | | No log | 8.0 | 32 | 4.5004 | | No log | 9.0 | 36 | 4.4665 | | No log | 10.0 | 40 | 4.4429 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Smith123/tiny-bert-sst2-distilled_L4_H_512_New
18eabaa91500e12edf22ec7f46e61d2d96930260
2022-07-01T03:54:39.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
Smith123
null
Smith123/tiny-bert-sst2-distilled_L4_H_512_New
6
null
transformers
15,814
Entry not found
tmoodley/rare-bottle
144fa0573afe00fbd4778cdd8abbc49e3cc161b2
2022-07-02T13:21:56.000Z
[ "pytorch", "tensorboard", "vit", "image-classification", "transformers", "huggingpics", "model-index" ]
image-classification
false
tmoodley
null
tmoodley/rare-bottle
6
1
transformers
15,815
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-bottle results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.6770833134651184 --- # rare-bottle Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### Don Julio ![Don Julio](images/Don_Julio.jpg) #### Jack Daniels ![Jack Daniels](images/Jack_Daniels.jpg) #### Southern Comfort ![Southern Comfort](images/Southern_Comfort.jpg) #### bacardi ![bacardi](images/bacardi.jpg) #### johnny walker ![johnny walker](images/johnny_walker.jpg)
BigSalmon/InformalToFormalLincoln54
45a830fbc2bdaa5d17084ee8f618ec107360e941
2022-07-04T01:20:37.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
BigSalmon
null
BigSalmon/InformalToFormalLincoln54
6
null
transformers
15,816
``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln54") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln54") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (Warriors vs. Rockets in Game 7): text: eagerly anticipated by fans, game 7's are the highlight of the post-season. text: ever-building in suspense, game 7's have the crowd captivated. *** Essay Intro (South Korean TV Is Becoming Popular): text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ). text: increasingly held in critical esteem, south korean television continues to impress. text: at the forefront of quality content, south korea is quickly achieving celebrity status. *** Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` - nebraska - unicamerical legislature - different from federal house and senate text: featuring a unicameral legislature, nebraska's political system stands in stark contrast to the federal model, comprised of a house and senate. *** - ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ``` ``` wordy: classical music is becoming less popular more and more. Translate into Concise Text: interest in classic music is fading. *** wordy: ``` ``` sweet: savvy voters ousted him. longer: voters who were informed delivered his defeat. *** sweet: ``` ``` 1: commercial space company spacex plans to launch a whopping 52 flights in 2022. 2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022. 3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights. 4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company. 5: a commercial space company, spacex aims to conduct 52 flights in 2022. *** 1: ``` Keywords to sentences or sentence. ``` ngos are characterized by: □ voluntary citizens' group that is organized on a local, national or international level □ encourage political participation □ often serve humanitarian functions □ work for social, economic, or environmental change *** what are the drawbacks of living near an airbnb? □ noise □ parking □ traffic □ security □ strangers *** ``` ``` original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung. adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung. *** original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark. adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark. *** original: ``` ``` original: had trouble deciding. translated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation. *** original: ``` ``` input: not loyal 1800s english: ( two-faced / inimical / perfidious / duplicitous / mendacious / double-dealing / shifty ). *** input: ```
abdulmatinomotoso/testing_news
0870ccfb5237b4343ffc53a924eb2dc166b5751d
2022-07-03T20:26:41.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
abdulmatinomotoso
null
abdulmatinomotoso/testing_news
6
null
transformers
15,817
Entry not found
emekaboris/code_t5_small_git_diff
4a29f92f1d14a52598b19727b4940c79c0a99c86
2022-07-04T10:55:38.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
emekaboris
null
emekaboris/code_t5_small_git_diff
6
null
transformers
15,818
Entry not found
AnonymousSub/fpdm_bert_pert_sent_0.01_squad2.0
43d3523e6c9ae35ea9b2b1489d9da2be6fbe33e5
2022-07-06T00:12:22.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
AnonymousSub
null
AnonymousSub/fpdm_bert_pert_sent_0.01_squad2.0
6
null
transformers
15,819
Entry not found
Shunichiro/distilbert-base-uncased-finetuned-squad
fc91b5645074ccbfb7602414f62862106a1935e1
2022-07-22T05:11:33.000Z
[ "pytorch", "tensorboard", "distilbert", "question-answering", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Shunichiro
null
Shunichiro/distilbert-base-uncased-finetuned-squad
6
null
transformers
15,820
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 5.0244 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 60 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 30 | 3.5643 | | No log | 2.0 | 60 | 2.4546 | | No log | 3.0 | 90 | 2.3018 | | No log | 4.0 | 120 | 2.4636 | | No log | 5.0 | 150 | 2.4736 | | No log | 6.0 | 180 | 2.5580 | | No log | 7.0 | 210 | 2.6686 | | No log | 8.0 | 240 | 2.7249 | | No log | 9.0 | 270 | 3.2596 | | No log | 10.0 | 300 | 3.5904 | | No log | 11.0 | 330 | 3.6709 | | No log | 12.0 | 360 | 3.6431 | | No log | 13.0 | 390 | 3.6343 | | No log | 14.0 | 420 | 3.8316 | | No log | 15.0 | 450 | 3.6363 | | No log | 16.0 | 480 | 3.8468 | | 0.8931 | 17.0 | 510 | 3.7114 | | 0.8931 | 18.0 | 540 | 3.8719 | | 0.8931 | 19.0 | 570 | 4.0872 | | 0.8931 | 20.0 | 600 | 4.2989 | | 0.8931 | 21.0 | 630 | 4.5494 | | 0.8931 | 22.0 | 660 | 4.2565 | | 0.8931 | 23.0 | 690 | 4.3009 | | 0.8931 | 24.0 | 720 | 4.1816 | | 0.8931 | 25.0 | 750 | 4.2583 | | 0.8931 | 26.0 | 780 | 4.2276 | | 0.8931 | 27.0 | 810 | 4.3481 | | 0.8931 | 28.0 | 840 | 4.4369 | | 0.8931 | 29.0 | 870 | 4.4891 | | 0.8931 | 30.0 | 900 | 4.5521 | | 0.8931 | 31.0 | 930 | 4.5201 | | 0.8931 | 32.0 | 960 | 4.6323 | | 0.8931 | 33.0 | 990 | 4.4766 | | 0.0297 | 34.0 | 1020 | 4.7612 | | 0.0297 | 35.0 | 1050 | 4.9057 | | 0.0297 | 36.0 | 1080 | 4.7580 | | 0.0297 | 37.0 | 1110 | 4.6351 | | 0.0297 | 38.0 | 1140 | 4.6495 | | 0.0297 | 39.0 | 1170 | 4.5980 | | 0.0297 | 40.0 | 1200 | 4.6370 | | 0.0297 | 41.0 | 1230 | 4.6523 | | 0.0297 | 42.0 | 1260 | 4.5802 | | 0.0297 | 43.0 | 1290 | 4.6304 | | 0.0297 | 44.0 | 1320 | 4.7111 | | 0.0297 | 45.0 | 1350 | 4.7219 | | 0.0297 | 46.0 | 1380 | 4.7323 | | 0.0297 | 47.0 | 1410 | 4.9115 | | 0.0297 | 48.0 | 1440 | 4.7873 | | 0.0297 | 49.0 | 1470 | 4.9340 | | 0.0023 | 50.0 | 1500 | 5.0638 | | 0.0023 | 51.0 | 1530 | 5.0750 | | 0.0023 | 52.0 | 1560 | 4.9338 | | 0.0023 | 53.0 | 1590 | 4.9197 | | 0.0023 | 54.0 | 1620 | 4.9282 | | 0.0023 | 55.0 | 1650 | 5.0038 | | 0.0023 | 56.0 | 1680 | 4.9848 | | 0.0023 | 57.0 | 1710 | 4.9932 | | 0.0023 | 58.0 | 1740 | 5.0134 | | 0.0023 | 59.0 | 1770 | 5.0303 | | 0.0023 | 60.0 | 1800 | 5.0244 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Tokenizers 0.12.1
vinayak361/token_fine_tunned_flipkart
2520e1d74b4fb54f8f050245745c94fb2717371f
2022-07-06T09:32:50.000Z
[ "pytorch", "tensorboard", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
vinayak361
null
vinayak361/token_fine_tunned_flipkart
6
null
transformers
15,821
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: token_fine_tunned_flipkart results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # token_fine_tunned_flipkart This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0992 - Precision: 0.9526 - Recall: 0.9669 - F1: 0.9597 - Accuracy: 0.9730 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 135 | 0.5967 | 0.7227 | 0.7830 | 0.7516 | 0.7932 | | No log | 2.0 | 270 | 0.3673 | 0.8105 | 0.8623 | 0.8356 | 0.8747 | | No log | 3.0 | 405 | 0.2679 | 0.8676 | 0.8854 | 0.8764 | 0.9094 | | 0.6219 | 4.0 | 540 | 0.1972 | 0.8955 | 0.9217 | 0.9084 | 0.9355 | | 0.6219 | 5.0 | 675 | 0.1500 | 0.9229 | 0.9374 | 0.9301 | 0.9525 | | 0.6219 | 6.0 | 810 | 0.1240 | 0.9341 | 0.9509 | 0.9424 | 0.9609 | | 0.6219 | 7.0 | 945 | 0.1041 | 0.9516 | 0.9650 | 0.9582 | 0.9720 | | 0.2085 | 8.0 | 1080 | 0.0992 | 0.9526 | 0.9669 | 0.9597 | 0.9730 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu102 - Datasets 2.2.2 - Tokenizers 0.12.1
ahadda5/bart_wikikp_ftuned_cve50k
f8bc8ec5d3358d1c621d4d4a089bfe61f9eac0db
2022-07-06T15:18:10.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
ahadda5
null
ahadda5/bart_wikikp_ftuned_cve50k
6
null
transformers
15,822
Bart wikikp , masked on cve50k
Aktsvigun/bart-base_aeslc_6585777
b84a4291d721565c03af9428104891e0b0daa9d8
2022-07-07T15:20:09.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_aeslc_6585777
6
null
transformers
15,823
Entry not found
Aktsvigun/bart-base_aeslc_5893459
2ccb70abdd449959d158ce48d6ccf6b9fb92fffb
2022-07-07T15:27:44.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_aeslc_5893459
6
null
transformers
15,824
Entry not found
Aktsvigun/bart-base_aeslc_7629317
1826189334fd3e7b2efe3755f9618ed493966ec5
2022-07-07T15:38:13.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_aeslc_7629317
6
null
transformers
15,825
Entry not found
Aktsvigun/bart-base_aeslc_6880281
45c4bb46b8d114abcb78451f6dc00b546799de32
2022-07-07T15:25:32.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Aktsvigun
null
Aktsvigun/bart-base_aeslc_6880281
6
null
transformers
15,826
Entry not found
IIC/mt5-large-lfqa-es
fb10f72a2f9c98fd0867a4554e8a7d2fb3f3b844
2022-07-07T11:21:53.000Z
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
IIC
null
IIC/mt5-large-lfqa-es
6
null
transformers
15,827
Entry not found
PronayGhosh18/dummy-model_101_pronay_ghosh
bfba57b8b04ec5d6ce624e9f4c4d721faaa90de0
2022-07-08T07:22:55.000Z
[ "pytorch", "camembert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
PronayGhosh18
null
PronayGhosh18/dummy-model_101_pronay_ghosh
6
null
transformers
15,828
Entry not found
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-Chem-512-5-30
26d490d421340cb0ce7dada784a860f605b01788
2022-07-11T09:11:50.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-scibert_scivocab_cased-BioRED-Chem-512-5-30
6
null
transformers
15,829
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-Chem-512-5-30
77bee4f31a93e509c896819f28eaf2dddada6225
2022-07-08T18:00:50.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-Chem-512-5-30
6
null
transformers
15,830
Entry not found
nvidia/stt_es_conformer_ctc_large
8853e9e151d598ca2724e524db07a722af517c5d
2022-07-13T16:40:22.000Z
[ "nemo", "es", "dataset:Fisher", "dataset:VoxPopuli", "dataset:facebook/multilingual_librispeech", "dataset:mozilla-foundation/common_voice_7_0", "arxiv:2005.08100", "automatic-speech-recognition", "speech", "audio", "CTC", "Conformer", "Transformer", "pytorch", "NeMo", "hf-asr-leaderboard", "Riva", "license:cc-by-4.0", "model-index" ]
automatic-speech-recognition
false
nvidia
null
nvidia/stt_es_conformer_ctc_large
6
1
nemo
15,831
--- language: - es library_name: nemo datasets: - Fisher - VoxPopuli - facebook/multilingual_librispeech - mozilla-foundation/common_voice_7_0 thumbnail: null tags: - automatic-speech-recognition - speech - audio - CTC - Conformer - Transformer - pytorch - NeMo - hf-asr-leaderboard - Riva license: cc-by-4.0 model-index: - name: stt_es_conformer_ctc_large results: - task: type: Automatic Speech Recognition name: speech-recognition dataset: name: common-voice-7-0-6 type: mozilla-foundation/common_voice_7_0 config: es split: dev args: language: es metrics: - name: Dev WER type: wer value: 5.0 - task: type: Automatic Speech Recognition name: speech-recognition dataset: name: common-voice-7-0-6 type: mozilla-foundation/common_voice_7_0 config: es split: test args: language: es metrics: - name: Test WER type: wer value: 5.5 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: Multilingual LibriSpeech type: facebook/multilingual_librispeech config: spanish split: dev args: language: es metrics: - name: Dev WER type: wer value: 3.6 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: Multilingual LibriSpeech type: facebook/multilingual_librispeech config: spanish split: test args: language: es metrics: - name: Test WER type: wer value: 3.6 --- # NVIDIA Conformer-CTC Large (es) <style> img { display: inline; } </style> | [![Model architecture](https://img.shields.io/badge/Model_Arch-Conformer--CTC-lightgrey#model-badge)](#model-architecture) | [![Model size](https://img.shields.io/badge/Params-120M-lightgrey#model-badge)](#model-architecture) | [![Language](https://img.shields.io/badge/Language-es-lightgrey#model-badge)](#datasets) | [![Riva Compatible](https://img.shields.io/badge/NVIDIA%20Riva-compatible-brightgreen#model-badge)](#deployment-with-nvidia-riva) | This model transcribes speech in lowercase Spanish alphabet including spaces, and was trained on a composite dataset comprising of 1340 hours of Spanish speech. It is a non-autoregressive "large" variant of Conformer, with around 120 million parameters. See the [model architecture](#model-architecture) section and [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#conformer-ctc) for complete architecture details. It is also compatible with NVIDIA Riva for [production-grade server deployments](#deployment-with-nvidia-riva). ## Usage The model is available for use in the NeMo toolkit [3], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset. To train, fine-tune or play with the model you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed latest PyTorch version. ``` pip install nemo_toolkit['all'] ``` ### Automatically instantiate the model ```python import nemo.collections.asr as nemo_asr asr_model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained("nvidia/stt_es_conformer_ctc_large") ``` ### Transcribing using Python First, let's get a sample ``` wget https://dldata-public.s3.us-east-2.amazonaws.com/2086-149220-0033.wav ``` Then simply do: ``` asr_model.transcribe(['2086-149220-0033.wav']) ``` ### Transcribing many audio files ```shell python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py pretrained_name="nvidia/stt_es_conformer_ctc_large" audio_dir="<DIRECTORY CONTAINING AUDIO FILES>" ``` ### Input This model accepts 16000 kHz Mono-channel Audio (wav files) as input. ### Output This model provides transcribed speech as a string for a given audio sample. ## Model Architecture Conformer-CTC model is a non-autoregressive variant of Conformer model [1] for Automatic Speech Recognition which uses CTC loss/decoding instead of Transducer. You may find more info on the detail of this model here: [Conformer-CTC Model](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#conformer-ctc). ## Training The NeMo toolkit [3] was used for training the models for over several hundred epochs. These model are trained with this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_ctc/speech_to_text_ctc_bpe.py) and this [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/conformer/conformer_ctc_bpe.yaml). The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py). The checkpoint of the language model used as the neural rescorer can be found [here](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_es_conformer_ctc_large/files). You may find more info on how to train and use language models for ASR models here: [ASR Language Modeling](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html) ### Datasets All the models in this collection are trained on a composite dataset (NeMo ASRSET) comprising of 1340 hours of Spanish speech: - Mozilla Common Voice 7.0 (Spanish) - 289 hours after data cleaning - Multilingual LibriSpeech (Spanish) - 801 hours after data cleaning - Voxpopuli transcribed subset (Spanish) - 110 hours after data cleaning - Fisher dataset (Spanish) - 140 hours after data cleaning ## Performance The list of the available models in this collection is shown in the following table. Performances of the ASR models are reported in terms of Word Error Rate (WER%) with greedy decoding. | Version | Tokenizer | Vocabulary Size | MCV 7.0 Dev | MCV 7.0 Test | MLS Dev | MLS Test | Voxpopuli Dev | Voxpopuli Test | Fisher Dev | Fisher Test| Train Dataset | |---------|-----------------------|-----------------|-------------|--------------|---------|----------|---------------|----------------|------------|-------------|-----------------| | 1.8.0 | SentencePiece Unigram | 1024 | 6.3 | 6.9 | 4.3 | 4.2 | 6.1 | 7.5 | 18.3 | 18.5 | NeMo ASRSET 2.0 | While deploying with [NVIDIA Riva](https://developer.nvidia.com/riva), you can combine this model with external language models to further improve WER. The WER(%) of the latest model with different language modeling techniques are reported in the following table. | Language Modeling | Training Dataset | MCV 7.0 Dev | MCV 7.0 Test | MLS Dev | MLS Test | Voxpopuli Dev | Voxpopuli Test | Fisher Dev | Fisher Test| Comment | |-------------------|------------------------------------------------------------------------------|-------------|--------------|---------|----------|---------------|----------------|----------------|----------------|--------------------------------------------------------| | N-gram LM | Spanish News Crawl corpus (50M sentences) + NeMo ASRSET training transcripts | 5.0 | 5.5 | 3.6 | 3.6 | 5.5 | 6.7 | 17.4 | 17.5 | N=4, beam_width=128, n_gram_alpha=0.8, n_gram_beta=1.5 | ## Limitations Since this model was trained on publicly available speech datasets, the performance of this model might degrade for speech which includes technical terms, or vernacular that the model has not been trained on. The model might also perform worse for accented speech. ## Deployment with NVIDIA Riva For the best real-time accuracy, latency, and throughput, deploy the model with [NVIDIA Riva](https://developer.nvidia.com/riva), an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, at the edge, and embedded. Additionally, Riva provides: * World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours * Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization * Streaming speech recognition, Kubernetes compatible scaling, and Enterprise-grade support Check out [Riva live demo](https://developer.nvidia.com/riva#demos). ## References - [1] [Conformer: Convolution-augmented Transformer for Speech Recognition](https://arxiv.org/abs/2005.08100) - [2] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece) - [3] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo)
yam1ke/distilbert-base-uncased-finetuned-ner
9c1be937a4b7a3c2e0dad2b0f4f048a2ed3e9ce4
2022-07-10T00:33:07.000Z
[ "pytorch", "distilbert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
yam1ke
null
yam1ke/distilbert-base-uncased-finetuned-ner
6
null
transformers
15,832
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9285476533895485 - name: Recall type: recall value: 0.9362344781295447 - name: F1 type: f1 value: 0.9323752228163993 - name: Accuracy type: accuracy value: 0.9838753236850049 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0607 - Precision: 0.9285 - Recall: 0.9362 - F1: 0.9324 - Accuracy: 0.9839 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2452 | 1.0 | 878 | 0.0709 | 0.9184 | 0.9206 | 0.9195 | 0.9803 | | 0.0501 | 2.0 | 1756 | 0.0621 | 0.9212 | 0.9328 | 0.9270 | 0.9830 | | 0.0299 | 3.0 | 2634 | 0.0607 | 0.9285 | 0.9362 | 0.9324 | 0.9839 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0 - Datasets 2.3.2 - Tokenizers 0.12.1
huangjia/pegasus-samsum
c078b0916f6dfa6a0c634a178f062195e0889978
2022-07-10T10:06:39.000Z
[ "pytorch", "tensorboard", "pegasus", "text2text-generation", "dataset:samsum", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
huangjia
null
huangjia/pegasus-samsum
6
null
transformers
15,833
--- tags: - generated_from_trainer datasets: - samsum model-index: - name: pegasus-samsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-samsum This model is a fine-tuned version of [google/pegasus-cnn_dailymail](https://huggingface.co/google/pegasus-cnn_dailymail) on the samsum dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.2 - Datasets 1.18.4 - Tokenizers 0.10.3
ghadeermobasher/Modified-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-Chem-512-5-10
7e03d90f518c76451116e806f07f46349701854c
2022-07-11T10:22:38.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-Chem-512-5-10
6
null
transformers
15,834
Entry not found
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-Chem-512-5-10
79a5aeb6d16466b38ec991852bf841970cfbb828
2022-07-11T10:23:55.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-Chem-512-5-10
6
null
transformers
15,835
Entry not found
paola-md/recipe-roberta-upper-tIs
d849044c59e30e19878e743a5ee3e4f7592fc414
2022-07-12T00:11:13.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
fill-mask
false
paola-md
null
paola-md/recipe-roberta-upper-tIs
6
null
transformers
15,836
--- license: mit tags: - generated_from_trainer model-index: - name: recipe-roberta-upper-tIs results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # recipe-roberta-upper-tIs This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7904 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2671 | 1.0 | 1281 | 1.0554 | | 1.0995 | 2.0 | 2562 | 0.9832 | | 1.0339 | 3.0 | 3843 | 0.9389 | | 0.9925 | 4.0 | 5124 | 0.9095 | | 0.964 | 5.0 | 6405 | 0.8914 | | 0.9426 | 6.0 | 7686 | 0.8708 | | 0.9227 | 7.0 | 8967 | 0.8590 | | 0.9082 | 8.0 | 10248 | 0.8448 | | 0.8963 | 9.0 | 11529 | 0.8361 | | 0.8847 | 10.0 | 12810 | 0.8249 | | 0.8756 | 11.0 | 14091 | 0.8204 | | 0.8672 | 12.0 | 15372 | 0.8105 | | 0.8612 | 13.0 | 16653 | 0.8106 | | 0.8561 | 14.0 | 17934 | 0.8041 | | 0.8485 | 15.0 | 19215 | 0.7979 | | 0.8452 | 16.0 | 20496 | 0.7910 | | 0.8403 | 17.0 | 21777 | 0.7991 | | 0.8389 | 18.0 | 23058 | 0.7928 | | 0.8371 | 19.0 | 24339 | 0.7926 | | 0.8341 | 20.0 | 25620 | 0.7904 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
Evelyn18/legalectra-small-spanish-becasv3-6
526339a729283b20b3f844ba6b75b5d59d779594
2022-07-12T05:05:14.000Z
[ "pytorch", "tensorboard", "electra", "question-answering", "dataset:becasv2", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
question-answering
false
Evelyn18
null
Evelyn18/legalectra-small-spanish-becasv3-6
6
null
transformers
15,837
--- tags: - generated_from_trainer datasets: - becasv2 model-index: - name: legalectra-small-spanish-becasv3-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # legalectra-small-spanish-becasv3-6 This model is a fine-tuned version of [mrm8488/legalectra-small-spanish](https://huggingface.co/mrm8488/legalectra-small-spanish) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 3.8441 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 150 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 5 | 5.6469 | | No log | 2.0 | 10 | 5.5104 | | No log | 3.0 | 15 | 5.4071 | | No log | 4.0 | 20 | 5.3313 | | No log | 5.0 | 25 | 5.2629 | | No log | 6.0 | 30 | 5.1972 | | No log | 7.0 | 35 | 5.1336 | | No log | 8.0 | 40 | 5.0667 | | No log | 9.0 | 45 | 5.0030 | | No log | 10.0 | 50 | 4.9302 | | No log | 11.0 | 55 | 4.8646 | | No log | 12.0 | 60 | 4.7963 | | No log | 13.0 | 65 | 4.7328 | | No log | 14.0 | 70 | 4.6735 | | No log | 15.0 | 75 | 4.6258 | | No log | 16.0 | 80 | 4.5869 | | No log | 17.0 | 85 | 4.5528 | | No log | 18.0 | 90 | 4.5177 | | No log | 19.0 | 95 | 4.4916 | | No log | 20.0 | 100 | 4.4685 | | No log | 21.0 | 105 | 4.4371 | | No log | 22.0 | 110 | 4.4271 | | No log | 23.0 | 115 | 4.3905 | | No log | 24.0 | 120 | 4.3931 | | No log | 25.0 | 125 | 4.3902 | | No log | 26.0 | 130 | 4.3772 | | No log | 27.0 | 135 | 4.3981 | | No log | 28.0 | 140 | 4.4463 | | No log | 29.0 | 145 | 4.4501 | | No log | 30.0 | 150 | 4.4654 | | No log | 31.0 | 155 | 4.4069 | | No log | 32.0 | 160 | 4.4108 | | No log | 33.0 | 165 | 4.4394 | | No log | 34.0 | 170 | 4.4320 | | No log | 35.0 | 175 | 4.3541 | | No log | 36.0 | 180 | 4.4534 | | No log | 37.0 | 185 | 4.2616 | | No log | 38.0 | 190 | 4.2474 | | No log | 39.0 | 195 | 4.4358 | | No log | 40.0 | 200 | 4.3060 | | No log | 41.0 | 205 | 4.1866 | | No log | 42.0 | 210 | 4.2735 | | No log | 43.0 | 215 | 4.2739 | | No log | 44.0 | 220 | 4.1812 | | No log | 45.0 | 225 | 4.2484 | | No log | 46.0 | 230 | 4.3706 | | No log | 47.0 | 235 | 4.3487 | | No log | 48.0 | 240 | 4.2805 | | No log | 49.0 | 245 | 4.3180 | | No log | 50.0 | 250 | 4.3574 | | No log | 51.0 | 255 | 4.2823 | | No log | 52.0 | 260 | 4.0643 | | No log | 53.0 | 265 | 4.0729 | | No log | 54.0 | 270 | 4.2368 | | No log | 55.0 | 275 | 4.2845 | | No log | 56.0 | 280 | 4.1009 | | No log | 57.0 | 285 | 4.0629 | | No log | 58.0 | 290 | 4.1250 | | No log | 59.0 | 295 | 4.2048 | | No log | 60.0 | 300 | 4.2412 | | No log | 61.0 | 305 | 4.1653 | | No log | 62.0 | 310 | 4.1433 | | No log | 63.0 | 315 | 4.1309 | | No log | 64.0 | 320 | 4.1381 | | No log | 65.0 | 325 | 4.2162 | | No log | 66.0 | 330 | 4.1858 | | No log | 67.0 | 335 | 4.1342 | | No log | 68.0 | 340 | 4.1247 | | No log | 69.0 | 345 | 4.1701 | | No log | 70.0 | 350 | 4.1915 | | No log | 71.0 | 355 | 4.1356 | | No log | 72.0 | 360 | 4.1766 | | No log | 73.0 | 365 | 4.1296 | | No log | 74.0 | 370 | 4.0594 | | No log | 75.0 | 375 | 4.0601 | | No log | 76.0 | 380 | 4.0328 | | No log | 77.0 | 385 | 3.9978 | | No log | 78.0 | 390 | 4.0070 | | No log | 79.0 | 395 | 4.0519 | | No log | 80.0 | 400 | 4.1000 | | No log | 81.0 | 405 | 3.9550 | | No log | 82.0 | 410 | 3.9159 | | No log | 83.0 | 415 | 3.9494 | | No log | 84.0 | 420 | 4.0546 | | No log | 85.0 | 425 | 4.2223 | | No log | 86.0 | 430 | 4.2665 | | No log | 87.0 | 435 | 3.8892 | | No log | 88.0 | 440 | 3.7763 | | No log | 89.0 | 445 | 3.8576 | | No log | 90.0 | 450 | 4.0089 | | No log | 91.0 | 455 | 4.1495 | | No log | 92.0 | 460 | 4.1545 | | No log | 93.0 | 465 | 4.0164 | | No log | 94.0 | 470 | 3.9175 | | No log | 95.0 | 475 | 3.9308 | | No log | 96.0 | 480 | 3.9658 | | No log | 97.0 | 485 | 3.9856 | | No log | 98.0 | 490 | 3.9691 | | No log | 99.0 | 495 | 3.9082 | | 3.2873 | 100.0 | 500 | 3.8736 | | 3.2873 | 101.0 | 505 | 3.8963 | | 3.2873 | 102.0 | 510 | 3.9391 | | 3.2873 | 103.0 | 515 | 3.9408 | | 3.2873 | 104.0 | 520 | 3.9075 | | 3.2873 | 105.0 | 525 | 3.8258 | | 3.2873 | 106.0 | 530 | 3.7917 | | 3.2873 | 107.0 | 535 | 3.7981 | | 3.2873 | 108.0 | 540 | 3.8272 | | 3.2873 | 109.0 | 545 | 3.8655 | | 3.2873 | 110.0 | 550 | 3.8234 | | 3.2873 | 111.0 | 555 | 3.7126 | | 3.2873 | 112.0 | 560 | 3.6981 | | 3.2873 | 113.0 | 565 | 3.7327 | | 3.2873 | 114.0 | 570 | 3.8470 | | 3.2873 | 115.0 | 575 | 4.0036 | | 3.2873 | 116.0 | 580 | 4.0412 | | 3.2873 | 117.0 | 585 | 4.0487 | | 3.2873 | 118.0 | 590 | 4.0524 | | 3.2873 | 119.0 | 595 | 4.0375 | | 3.2873 | 120.0 | 600 | 3.9971 | | 3.2873 | 121.0 | 605 | 3.8959 | | 3.2873 | 122.0 | 610 | 3.8834 | | 3.2873 | 123.0 | 615 | 3.9279 | | 3.2873 | 124.0 | 620 | 3.9374 | | 3.2873 | 125.0 | 625 | 3.9515 | | 3.2873 | 126.0 | 630 | 3.9625 | | 3.2873 | 127.0 | 635 | 3.9635 | | 3.2873 | 128.0 | 640 | 3.9596 | | 3.2873 | 129.0 | 645 | 3.8871 | | 3.2873 | 130.0 | 650 | 3.8307 | | 3.2873 | 131.0 | 655 | 3.8318 | | 3.2873 | 132.0 | 660 | 3.8403 | | 3.2873 | 133.0 | 665 | 3.8560 | | 3.2873 | 134.0 | 670 | 3.8650 | | 3.2873 | 135.0 | 675 | 3.8734 | | 3.2873 | 136.0 | 680 | 3.8756 | | 3.2873 | 137.0 | 685 | 3.8613 | | 3.2873 | 138.0 | 690 | 3.8447 | | 3.2873 | 139.0 | 695 | 3.8362 | | 3.2873 | 140.0 | 700 | 3.8328 | | 3.2873 | 141.0 | 705 | 3.8350 | | 3.2873 | 142.0 | 710 | 3.8377 | | 3.2873 | 143.0 | 715 | 3.8399 | | 3.2873 | 144.0 | 720 | 3.8414 | | 3.2873 | 145.0 | 725 | 3.8422 | | 3.2873 | 146.0 | 730 | 3.8435 | | 3.2873 | 147.0 | 735 | 3.8437 | | 3.2873 | 148.0 | 740 | 3.8437 | | 3.2873 | 149.0 | 745 | 3.8440 | | 3.2873 | 150.0 | 750 | 3.8441 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-Dis-512-5-30
0543e98e4da4893e1e216f0a7a04d830d757f758
2022-07-12T11:33:09.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-BiomedNLP-PubMedBERT-base-uncased-abstract-BioRED-Dis-512-5-30
6
null
transformers
15,838
Entry not found
ghadeermobasher/Original-biobert-v1.1-BioRED_Dis-320-8-10
8059f6895398c98bb586c5e2997cfcb9faa98a29
2022-07-12T14:37:31.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-biobert-v1.1-BioRED_Dis-320-8-10
6
null
transformers
15,839
Entry not found
ghadeermobasher/Modified-biobert-v1.1-BioRED-Dis-320-8-10
8582e854020f91cb9922963dd255fcec34182e0b
2022-07-13T12:57:26.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-biobert-v1.1-BioRED-Dis-320-8-10
6
null
transformers
15,840
ghadeermobasher/Original-scibert_scivocab_cased-BioRED_Dis-320-8-10
cfff8e379cd2412e15a3267f878c51ae223d2213
2022-07-12T14:41:34.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Original-scibert_scivocab_cased-BioRED_Dis-320-8-10
6
null
transformers
15,841
Entry not found
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-Dis-320-8-10
3d716b334d2b09f6d996eaa70ed481fe04c97a81
2022-07-12T14:42:36.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
ghadeermobasher
null
ghadeermobasher/Modified-scibert_scivocab_cased-BioRED-Dis-320-8-10
6
null
transformers
15,842
Entry not found
andreaschandra/xlm-roberta-base-finetuned-panx-en
6d4f27a5ad8b1597cd443502445d0b7374ccebb3
2022-07-12T15:39:20.000Z
[ "pytorch", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
andreaschandra
null
andreaschandra/xlm-roberta-base-finetuned-panx-en
6
null
transformers
15,843
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-en results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.en metrics: - name: F1 type: f1 value: 0.6774373259052925 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.3932 - F1: 0.6774 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.0236 | 1.0 | 50 | 0.5462 | 0.5109 | | 0.5047 | 2.0 | 100 | 0.4387 | 0.6370 | | 0.3716 | 3.0 | 150 | 0.3932 | 0.6774 | ### Framework versions - Transformers 4.19.4 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
zluvolyote/s288cExpressionPrediction_k6
8d3d6fc7417ce119c2b24b76b81da48f084508b0
2022-07-12T16:54:43.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
zluvolyote
null
zluvolyote/s288cExpressionPrediction_k6
6
null
transformers
15,844
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: s288cExpressionPrediction_k6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # s288cExpressionPrediction_k6 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4418 - Accuracy: 0.8067 - F1: 0.7882 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 58 | 0.5315 | 0.7278 | 0.7572 | | No log | 2.0 | 116 | 0.4604 | 0.7853 | 0.7841 | | No log | 3.0 | 174 | 0.4418 | 0.8067 | 0.7882 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Team-PIXEL/pixel-base-finetuned-pos-ud-chinese-gsd
d64d39625423a61eddf730c45b4fe7c734b7063c
2022-07-13T00:25:47.000Z
[ "pytorch", "pixel", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-pos-ud-chinese-gsd
6
null
transformers
15,845
Entry not found
Team-PIXEL/pixel-base-finetuned-parsing-ud-chinese-gsd
5a37a7cd43599c02e2241085e324f575b49144ef
2022-07-13T01:54:48.000Z
[ "pytorch", "pixel", "transformers" ]
null
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-parsing-ud-chinese-gsd
6
null
transformers
15,846
Entry not found
NimaBoscarino/STPushToHub-test2
30b65d59877dd9c81c416df4187012e766e8485a
2022-07-13T05:57:37.000Z
[ "pytorch", "distilbert", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
NimaBoscarino
null
NimaBoscarino/STPushToHub-test2
6
null
sentence-transformers
15,847
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # NimaBoscarino/STPushToHub-test2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('NimaBoscarino/STPushToHub-test2') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('NimaBoscarino/STPushToHub-test2') model = AutoModel.from_pretrained('NimaBoscarino/STPushToHub-test2') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=NimaBoscarino/STPushToHub-test2) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 360 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 4, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 144, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Team-PIXEL/pixel-base-finetuned-parsing-ud-coptic-scriptorium
42cdd6f05fedbfb571c6d3c8555be9b04b0f0ddc
2022-07-13T14:36:01.000Z
[ "pytorch", "pixel", "transformers" ]
null
false
Team-PIXEL
null
Team-PIXEL/pixel-base-finetuned-parsing-ud-coptic-scriptorium
6
null
transformers
15,848
Entry not found
Hamzaaa/wav2vec2-base-finetuned-greek
98d486d2a5a9fa8f23a927981d808ac276d5ce6e
2022-07-13T17:43:56.000Z
[ "pytorch", "tensorboard", "wav2vec2", "audio-classification", "transformers" ]
audio-classification
false
Hamzaaa
null
Hamzaaa/wav2vec2-base-finetuned-greek
6
null
transformers
15,849
Entry not found
Hamzaaa/wav2vec2-base-finetuned-Tess-excluded
6cccf2ffbdb719557fefe1e50bc186c1eda1c461
2022-07-13T20:26:32.000Z
[ "pytorch", "tensorboard", "wav2vec2", "audio-classification", "transformers" ]
audio-classification
false
Hamzaaa
null
Hamzaaa/wav2vec2-base-finetuned-Tess-excluded
6
null
transformers
15,850
Entry not found
leokai/distilbert-base-uncased-finetuned-wikiandmark
4293fabc9a2b3763fb77ae7dbbd492ad8c4258bf
2022-07-14T09:51:53.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
leokai
null
leokai/distilbert-base-uncased-finetuned-wikiandmark
6
null
transformers
15,851
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-wikiandmark results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-wikiandmark This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0329 - Accuracy: 0.9962 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0058 | 1.0 | 1490 | 0.0261 | 0.9954 | | 0.0058 | 2.0 | 2980 | 0.0335 | 0.9945 | | 0.0024 | 3.0 | 4470 | 0.0309 | 0.9961 | | 0.0007 | 4.0 | 5960 | 0.0323 | 0.9961 | | 0.0009 | 5.0 | 7450 | 0.0329 | 0.9962 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
liyijing024/swin-base-patch4-window7-224-in22k-Chinese-finetuned
d852360a6933f5d5e8eaba7655923007077ae434
2022-07-14T18:04:48.000Z
[ "pytorch", "tensorboard", "swin", "image-classification", "dataset:imagefolder", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
image-classification
false
liyijing024
null
liyijing024/swin-base-patch4-window7-224-in22k-Chinese-finetuned
6
null
transformers
15,852
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-base-patch4-window7-224-in22k-Chinese-finetuned results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder args: default metrics: - name: Accuracy type: accuracy value: 1.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-base-patch4-window7-224-in22k-Chinese-finetuned This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224-in22k](https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0000 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0121 | 0.99 | 140 | 0.0001 | 1.0 | | 0.0103 | 1.99 | 280 | 0.0001 | 1.0 | | 0.0049 | 2.99 | 420 | 0.0000 | 1.0 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.8.0+cu111 - Datasets 2.3.3.dev0 - Tokenizers 0.12.1
jinwooChoi/KDW_SA_base_32_5e4
c44e34aa58e15368791c015d705f091da39717ed
2022-07-15T08:18:12.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
jinwooChoi
null
jinwooChoi/KDW_SA_base_32_5e4
6
null
transformers
15,853
Entry not found
huggingtweets/thes_standsfor
56746a5eb7e2f7ff3cd50bbc6e5e0165e9fab6c8
2022-07-16T02:33:44.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/thes_standsfor
6
null
transformers
15,854
--- language: en thumbnail: http://www.huggingtweets.com/thes_standsfor/1657938820053/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1544350525558525952/duMyGvoZ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">DIDN’T DISAPPOINT A PICTURE?</div> <div style="text-align: center; font-size: 14px;">@thes_standsfor</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from DIDN’T DISAPPOINT A PICTURE?. | Data | DIDN’T DISAPPOINT A PICTURE? | | --- | --- | | Tweets downloaded | 3234 | | Retweets | 1939 | | Short tweets | 314 | | Tweets kept | 981 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/p6qccgkf/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @thes_standsfor's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1zso9llp) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1zso9llp/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/thes_standsfor') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
CaoHaiNam/vietnamese-address-embedding
9f7316158cf8ffc08886fd7a52533864c206a680
2022-07-15T13:15:49.000Z
[ "pytorch", "bert", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
CaoHaiNam
null
CaoHaiNam/vietnamese-address-embedding
6
null
sentence-transformers
15,855
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # CaoHaiNam/vietnamese-address-embedding This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('CaoHaiNam/vietnamese-address-embedding') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('CaoHaiNam/vietnamese-address-embedding') model = AutoModel.from_pretrained('CaoHaiNam/vietnamese-address-embedding') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=CaoHaiNam/vietnamese-address-embedding) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 8626 with parameters: ``` {'batch_size': 4, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 100, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 64, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
nloc2578/3
e461de2acca8e77b49d3e4810e21ef5f360065b3
2022-07-16T05:36:32.000Z
[ "pytorch", "tensorboard", "pegasus", "text2text-generation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
nloc2578
null
nloc2578/3
6
null
transformers
15,856
--- tags: - generated_from_trainer model-index: - name: '3' results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 3 This model is a fine-tuned version of [google/pegasus-xsum](https://huggingface.co/google/pegasus-xsum) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: nan ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0015 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 150 - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.9957 | 0.3 | 1000 | 1.8064 | | 1.9022 | 0.6 | 2000 | 1.7976 | | 1.937 | 0.9 | 3000 | 1.7962 | | 1.7922 | 1.2 | 4000 | 1.7951 | | 1.6093 | 1.5 | 5000 | 1.7943 | | 1.6786 | 1.8 | 6000 | 1.7938 | | 1.6979 | 2.1 | 7000 | nan | | 0.0 | 2.4 | 8000 | nan | | 0.0 | 2.7 | 9000 | nan | | 0.0 | 2.99 | 10000 | nan | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Tokenizers 0.12.1
mipatov/t5_test_no_spaces
85a59f1ec9263f045cd7dc98369d198ec493c331
2022-07-16T08:33:48.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
mipatov
null
mipatov/t5_test_no_spaces
6
null
transformers
15,857
Entry not found
abdulmatinomotoso/testing_headline_generator_2
55fe2426dbe3fe2f452bd55d505522c722141e0c
2022-07-17T11:34:28.000Z
[ "pytorch", "tensorboard", "pegasus", "text2text-generation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
abdulmatinomotoso
null
abdulmatinomotoso/testing_headline_generator_2
6
null
transformers
15,858
--- tags: - generated_from_trainer model-index: - name: testing_headline_generator_2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # testing_headline_generator_2 This model is a fine-tuned version of [google/pegasus-multi_news](https://huggingface.co/google/pegasus-multi_news) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 7.5747 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 7.7589 | 0.73 | 100 | 7.5747 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
jinwooChoi/hjw_base
afdde3e8e884afe3179eed944ae0664918c85bbb
2022-07-18T02:04:05.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
jinwooChoi
null
jinwooChoi/hjw_base
6
null
transformers
15,859
Entry not found
jinwooChoi/KDW_SA_mix_64_1e4
9337a65ddba4c16232242d675f5e8e7435c1b244
2022-07-18T02:19:08.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
jinwooChoi
null
jinwooChoi/KDW_SA_mix_64_1e4
6
null
transformers
15,860
Entry not found
jinwooChoi/KDW_SA_mix_48_1e5
bcb275d87a1b22b3fa9606c1af149ba0449f1ff6
2022-07-18T02:59:41.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
jinwooChoi
null
jinwooChoi/KDW_SA_mix_48_1e5
6
null
transformers
15,861
Entry not found
jinwooChoi/KDW_SA_small_mix_16_1e5
ad5d8f5733cfc89c808522378b6fd5a9b541857f
2022-07-18T08:09:23.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
jinwooChoi
null
jinwooChoi/KDW_SA_small_mix_16_1e5
6
null
transformers
15,862
Entry not found
jordyvl/bert-base-portuguese-cased_harem-selective-CRF-first-ner
4286fb94aa8b683d2e0ba03175dacb6a55f24f9a
2022-07-19T09:06:13.000Z
[ "pytorch", "tensorboard", "bert", "dataset:harem", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
null
false
jordyvl
null
jordyvl/bert-base-portuguese-cased_harem-selective-CRF-first-ner
6
null
transformers
15,863
--- license: mit tags: - generated_from_trainer datasets: - harem metrics: - precision - recall - f1 - accuracy model-index: - name: bert-base-portuguese-cased_harem-selective-CRF-first-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-portuguese-cased_harem-selective-CRF-first-ner This model is a fine-tuned version of [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased) on the harem dataset. It achieves the following results on the evaluation set: - Loss: 0.2045 - Precision: 0.5352 - Recall: 0.4351 - F1: 0.48 - Accuracy: 0.9484 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.102 | 1.0 | 2517 | 0.2498 | 0.4367 | 0.3817 | 0.4073 | 0.9332 | | 0.0614 | 2.0 | 5034 | 0.1842 | 0.4756 | 0.4084 | 0.4394 | 0.9408 | | 0.0455 | 3.0 | 7551 | 0.2045 | 0.5352 | 0.4351 | 0.48 | 0.9484 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.2.2 - Tokenizers 0.12.1
jinwooChoi/SKKU_AP_SA_KES
736fb2cf150f0191bcb89ec74f0532d1fe09499c
2022-07-19T02:35:53.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
jinwooChoi
null
jinwooChoi/SKKU_AP_SA_KES
6
null
transformers
15,864
Entry not found
abdulmatinomotoso/testing_headline_generator_3
0ad74ab69fa6cef5a177d48735d7590400e710b8
2022-07-19T09:38:13.000Z
[ "pytorch", "tensorboard", "pegasus", "text2text-generation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
abdulmatinomotoso
null
abdulmatinomotoso/testing_headline_generator_3
6
null
transformers
15,865
--- tags: - generated_from_trainer model-index: - name: testing_headline_generator_3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # testing_headline_generator_3 This model is a fine-tuned version of [google/pegasus-multi_news](https://huggingface.co/google/pegasus-multi_news) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 5.2949 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 7.781 | 0.4 | 100 | 7.5730 | | 5.7967 | 0.8 | 200 | 5.2949 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
google/ddpm-ema-church-256
b653e4de0ed723fd28006939480042a42edd28e2
2022-07-21T15:00:20.000Z
[ "diffusers", "arxiv:2006.11239", "pytorch", "unconditional-image-generation", "license:apache-2.0" ]
unconditional-image-generation
false
google
null
google/ddpm-ema-church-256
6
null
diffusers
15,866
--- license: apache-2.0 tags: - pytorch - diffusers - unconditional-image-generation --- # Denoising Diffusion Probabilistic Models (DDPM) **Paper**: [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) **Authors**: Jonathan Ho, Ajay Jain, Pieter Abbeel **Abstract**: *We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN.* ## Inference **DDPM** models can use *discrete noise schedulers* such as: - [scheduling_ddpm](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddpm.py) - [scheduling_ddim](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_ddim.py) - [scheduling_pndm](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_pndm.py) for inference. Note that while the *ddpm* scheduler yields the highest quality, it also takes the longest. For a good trade-off between quality and inference speed you might want to consider the *ddim* or *pndm* schedulers instead. See the following code: ```python # !pip install diffusers from diffusers import DDPMPipeline, DDIMPipeline, PNDMPipeline model_id = "google/ddpm-ema-church-256" # load model and scheduler ddpm = DDPMPipeline.from_pretrained(model_id) # you can replace DDPMPipeline with DDIMPipeline or PNDMPipeline for faster inference # run pipeline in inference (sample random noise and denoise) image = ddpm()["sample"] # save image image[0].save("ddpm_generated_image.png") ``` For more in-detail information, please have a look at the [official inference example](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) ## Training If you want to train your own model, please have a look at the [official training example](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) ## Samples 1. ![sample_1](https://huggingface.co/google/ddpm-ema-church-256/resolve/main/images/generated_image_0.png) 2. ![sample_2](https://huggingface.co/google/ddpm-ema-church-256/resolve/main/images/generated_image_1.png) 3. ![sample_3](https://huggingface.co/google/ddpm-ema-church-256/resolve/main/images/generated_image_2.png) 4. ![sample_4](https://huggingface.co/google/ddpm-ema-church-256/resolve/main/images/generated_image_3.png)
jinwooChoi/KDW_SA_base_mix_64_1e4
18d16ccf7c3189d1c997773a63012219563343e0
2022-07-20T05:21:33.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
jinwooChoi
null
jinwooChoi/KDW_SA_base_mix_64_1e4
6
null
transformers
15,867
Entry not found
nloc2578/3.5
4496abc8d4e88a7983da55dccc1088091669d924
2022-07-20T11:32:24.000Z
[ "pytorch", "tensorboard", "pegasus", "text2text-generation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
nloc2578
null
nloc2578/3.5
6
null
transformers
15,868
--- tags: - generated_from_trainer model-index: - name: '3.5' results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 3.5 This model is a fine-tuned version of [google/pegasus-xsum](https://huggingface.co/google/pegasus-xsum) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4461 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 150 - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.854 | 0.11 | 1000 | 1.6732 | | 1.736 | 0.22 | 2000 | 1.5991 | | 1.6452 | 0.33 | 3000 | 1.5589 | | 1.6176 | 0.45 | 4000 | 1.5310 | | 1.6151 | 0.56 | 5000 | 1.5173 | | 1.5707 | 0.67 | 6000 | 1.4982 | | 1.5557 | 0.78 | 7000 | 1.4946 | | 1.5307 | 0.89 | 8000 | 1.4748 | | 1.5393 | 1.0 | 9000 | 1.4635 | | 1.3077 | 1.11 | 10000 | 1.4662 | | 1.3419 | 1.22 | 11000 | 1.4705 | | 1.3245 | 1.34 | 12000 | 1.4653 | | 1.3584 | 1.45 | 13000 | 1.4448 | | 1.3403 | 1.56 | 14000 | 1.4452 | | 1.2745 | 1.67 | 15000 | 1.4353 | | 1.2979 | 1.78 | 16000 | 1.4333 | | 1.3084 | 1.89 | 17000 | 1.4284 | | 1.3009 | 2.0 | 18000 | 1.4286 | | 1.1523 | 2.11 | 19000 | 1.4609 | | 1.1352 | 2.23 | 20000 | 1.4565 | | 1.1484 | 2.34 | 21000 | 1.4588 | | 1.1482 | 2.45 | 22000 | 1.4548 | | 1.1355 | 2.56 | 23000 | 1.4535 | | 1.1429 | 2.67 | 24000 | 1.4485 | | 1.1328 | 2.78 | 25000 | 1.4499 | | 1.1487 | 2.89 | 26000 | 1.4461 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Tokenizers 0.12.1
juliensimon/distilbert-amazon-shoe-reviews-tensorboard
7951d664f44c7131ca71cad1852a560065269cb5
2022-07-20T09:22:34.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
juliensimon
null
juliensimon/distilbert-amazon-shoe-reviews-tensorboard
6
null
transformers
15,869
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: distilbert-amazon-shoe-reviews-tensorboard results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-amazon-shoe-reviews-tensorboard This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.9534 - Accuracy: 0.5779 - F1: [0.63189419 0.46645049 0.50381304 0.55843496 0.73060507] - Precision: [0.62953754 0.47008547 0.48669202 0.58801498 0.71780957] - Recall: [0.63426854 0.46287129 0.52218256 0.53168844 0.74386503] ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------------------------------------------------------:|:--------------------------------------------------------:|:--------------------------------------------------------:| | 0.8776 | 1.0 | 2813 | 0.9534 | 0.5779 | [0.63189419 0.46645049 0.50381304 0.55843496 0.73060507] | [0.62953754 0.47008547 0.48669202 0.58801498 0.71780957] | [0.63426854 0.46287129 0.52218256 0.53168844 0.74386503] | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
koanlp/bart-large-cnn-finetuned-wiki
3a9e5ff5f5f0a0ba125cb22751ea84eb5602cc1d
2022-07-21T04:03:42.000Z
[ "pytorch", "bart", "text2text-generation", "dataset:wiki_lingua", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
koanlp
null
koanlp/bart-large-cnn-finetuned-wiki
6
null
transformers
15,870
--- license: mit tags: - generated_from_trainer datasets: - wiki_lingua model-index: - name: bart-large-cnn-finetuned-wiki results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-cnn-finetuned-wiki This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on the wiki_lingua dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 - label_smoothing_factor: 0.1 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
okho0653/Bio_ClinicalBERT-zero-shot-finetuned-50cad
834dbf7bdce8dcf465078473881a0d4c34475417
2022-07-22T05:42:33.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
okho0653
null
okho0653/Bio_ClinicalBERT-zero-shot-finetuned-50cad
6
null
transformers
15,871
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: Bio_ClinicalBERT-zero-shot-finetuned-50cad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Bio_ClinicalBERT-zero-shot-finetuned-50cad This model is a fine-tuned version of [emilyalsentzer/Bio_ClinicalBERT](https://huggingface.co/emilyalsentzer/Bio_ClinicalBERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.1475 - Accuracy: 0.5 - F1: 0.6667 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
okho0653/Bio_ClinicalBERT-zero-shot-finetuned-50noncad
58d720434eab2fb250d896582032e3c15dc17cd3
2022-07-22T05:55:47.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
okho0653
null
okho0653/Bio_ClinicalBERT-zero-shot-finetuned-50noncad
6
null
transformers
15,872
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: Bio_ClinicalBERT-zero-shot-finetuned-50noncad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Bio_ClinicalBERT-zero-shot-finetuned-50noncad This model is a fine-tuned version of [emilyalsentzer/Bio_ClinicalBERT](https://huggingface.co/emilyalsentzer/Bio_ClinicalBERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.8046 - Accuracy: 0.5 - F1: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
jinwooChoi/SKKU_SA_HJW_0722_0
e7b62d24b85eea167907d12ea7f675bd5233511a
2022-07-22T07:46:17.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
jinwooChoi
null
jinwooChoi/SKKU_SA_HJW_0722_0
6
null
transformers
15,873
Entry not found
ronanki/all-mpnet-base-v2-2022-07-18_15-29-33
cd428edb20616d43b0beabb7c2742154bcf565f8
2022-07-22T11:11:58.000Z
[ "pytorch", "mpnet", "feature-extraction", "sentence-transformers", "sentence-similarity" ]
sentence-similarity
false
ronanki
null
ronanki/all-mpnet-base-v2-2022-07-18_15-29-33
6
null
sentence-transformers
15,874
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # ronanki/all-mpnet-base-v2-2022-07-18_15-29-33 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('ronanki/all-mpnet-base-v2-2022-07-18_15-29-33') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=ronanki/all-mpnet-base-v2-2022-07-18_15-29-33) ## Training The model was trained with the parameters: **DataLoader**: `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 22 with parameters: ``` {'batch_size': 64} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 22, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Normalize() ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
shiulian/t5-end2end-questions-generation
1f6a6a25327cd8eaa73b95563be49d24f8e6e065
2022-07-23T14:19:26.000Z
[ "pytorch", "t5", "text2text-generation", "dataset:squad_modified_for_t5_qg", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
shiulian
null
shiulian/t5-end2end-questions-generation
6
null
transformers
15,875
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad_modified_for_t5_qg model-index: - name: t5-end2end-questions-generation results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-end2end-questions-generation This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the squad_modified_for_t5_qg dataset. It achieves the following results on the evaluation set: - Loss: 1.5679 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 7 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.5866 | 0.34 | 100 | 1.9116 | | 1.9674 | 0.68 | 200 | 1.7280 | | 1.8487 | 1.02 | 300 | 1.6650 | | 1.7429 | 1.36 | 400 | 1.6400 | | 1.7148 | 1.69 | 500 | 1.6214 | | 1.695 | 2.03 | 600 | 1.6076 | | 1.6321 | 2.37 | 700 | 1.5979 | | 1.6276 | 2.71 | 800 | 1.5910 | | 1.6171 | 3.05 | 900 | 1.5875 | | 1.5712 | 3.39 | 1000 | 1.5898 | | 1.5702 | 3.73 | 1100 | 1.5749 | | 1.5594 | 4.07 | 1200 | 1.5798 | | 1.5352 | 4.41 | 1300 | 1.5733 | | 1.5228 | 4.75 | 1400 | 1.5733 | | 1.524 | 5.08 | 1500 | 1.5727 | | 1.4954 | 5.42 | 1600 | 1.5699 | | 1.4866 | 5.76 | 1700 | 1.5696 | | 1.5089 | 6.1 | 1800 | 1.5696 | | 1.4771 | 6.44 | 1900 | 1.5736 | | 1.4772 | 6.78 | 2000 | 1.5679 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Siyong/MT_LM
16f547917ca7534ef8b3d37a968c5294822219e5
2022-07-23T17:03:19.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Siyong
null
Siyong/MT_LM
6
null
transformers
15,876
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec-base-Millad_TIMIT results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec-base-Millad_TIMIT This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.3772 - Wer: 0.6859 - Cer: 0.3217 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5000 - num_epochs: 60 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:-----:|:-----:|:---------------:|:------:|:------:| | No log | 2.36 | 2000 | 2.6233 | 1.0130 | 0.6241 | | No log | 4.73 | 4000 | 2.2206 | 0.9535 | 0.5032 | | No log | 7.09 | 6000 | 2.3036 | 0.9368 | 0.5063 | | 1.235 | 9.46 | 8000 | 1.9932 | 0.9275 | 0.5032 | | 1.235 | 11.82 | 10000 | 2.0207 | 0.8922 | 0.4498 | | 1.235 | 14.18 | 12000 | 1.6171 | 0.7993 | 0.3976 | | 1.235 | 16.55 | 14000 | 1.6729 | 0.8309 | 0.4209 | | 0.2779 | 18.91 | 16000 | 1.7043 | 0.8141 | 0.4340 | | 0.2779 | 21.28 | 18000 | 1.7426 | 0.7658 | 0.3960 | | 0.2779 | 23.64 | 20000 | 1.5230 | 0.7361 | 0.3830 | | 0.2779 | 26.0 | 22000 | 1.4286 | 0.7658 | 0.3794 | | 0.1929 | 28.37 | 24000 | 1.4450 | 0.7379 | 0.3644 | | 0.1929 | 30.73 | 26000 | 1.5922 | 0.7491 | 0.3826 | | 0.1929 | 33.1 | 28000 | 1.4443 | 0.7454 | 0.3617 | | 0.1929 | 35.46 | 30000 | 1.5450 | 0.7268 | 0.3621 | | 0.1394 | 37.83 | 32000 | 1.9268 | 0.7491 | 0.3763 | | 0.1394 | 40.19 | 34000 | 1.7094 | 0.7342 | 0.3783 | | 0.1394 | 42.55 | 36000 | 1.4024 | 0.7082 | 0.3494 | | 0.1394 | 44.92 | 38000 | 1.4467 | 0.6840 | 0.3395 | | 0.104 | 47.28 | 40000 | 1.4145 | 0.6933 | 0.3407 | | 0.104 | 49.65 | 42000 | 1.3901 | 0.6970 | 0.3403 | | 0.104 | 52.01 | 44000 | 1.3589 | 0.6636 | 0.3348 | | 0.104 | 54.37 | 46000 | 1.3716 | 0.6952 | 0.3340 | | 0.0781 | 56.74 | 48000 | 1.4025 | 0.6896 | 0.3312 | | 0.0781 | 59.1 | 50000 | 1.3772 | 0.6859 | 0.3217 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.12.0+cu113 - Datasets 1.18.3 - Tokenizers 0.12.1
jcashmoney123/autotrain-amz-1171143428
d15e662ff875dbba21317d86c0b2dab3ded04491
2022-07-23T18:31:20.000Z
[ "pytorch", "bart", "text2text-generation", "unk", "dataset:jcashmoney123/autotrain-data-amz", "transformers", "autotrain", "co2_eq_emissions", "autotrain_compatible" ]
text2text-generation
false
jcashmoney123
null
jcashmoney123/autotrain-amz-1171143428
6
null
transformers
15,877
--- tags: autotrain language: unk widget: - text: "I love AutoTrain 🤗" datasets: - jcashmoney123/autotrain-data-amz co2_eq_emissions: 5.4331208624177245 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 1171143428 - CO2 Emissions (in grams): 5.4331208624177245 ## Validation Metrics - Loss: 2.5859596729278564 - Rouge1: 19.3601 - Rouge2: 4.6055 - RougeL: 17.4309 - RougeLsum: 17.4621 - Gen Len: 15.2938 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/jcashmoney123/autotrain-amz-1171143428 ```
erikanesse/great-books-bot-2
693cd4ff288f079c43dc6b51a9df3388a0fa44fd
2022-07-30T00:59:39.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "model-index" ]
text-generation
false
erikanesse
null
erikanesse/great-books-bot-2
6
null
transformers
15,878
--- tags: - generated_from_trainer model-index: - name: great-books-bot-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # great-books-bot-2 This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - eval_loss: 5.6204 - eval_runtime: 12.3909 - eval_samples_per_second: 0.484 - eval_steps_per_second: 0.081 - epoch: 0.06 - step: 20 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 3 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 100 ### Framework versions - Transformers 4.21.0 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
Migga/ViT-BERT-Chess-V2
3192e80982517c93bbbae18f04aa1a455651b3b3
2022-07-25T07:28:02.000Z
[ "pytorch", "vision-encoder-decoder", "transformers", "generated_from_trainer", "model-index" ]
null
false
Migga
null
Migga/ViT-BERT-Chess-V2
6
null
transformers
15,879
--- tags: - generated_from_trainer model-index: - name: ViT-BERT-Chess-V2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ViT-BERT-Chess-V2 This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.7128 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 10 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 4.0385 | 1.0 | 2770 | 3.9132 | | 3.7453 | 2.0 | 5540 | 3.7552 | | 3.6513 | 3.0 | 8310 | 3.7128 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
bongsoo/mdistilbertV1
0a6df2085032a9b6d658e7c55a798fe5b5558495
2022-07-26T06:15:55.000Z
[ "pytorch", "distilbert", "fill-mask", "transformers", "en", "ko", "autotrain_compatible" ]
fill-mask
false
bongsoo
null
bongsoo/mdistilbertV1
6
null
transformers
15,880
--- pipeline_tag: fill-mask tags: - fill-mask - transformers - en - ko --- distil-base-multilingual-cased 에 kowiki20220620 정제된 말뭉치로 한국어 vocab 추가하여 한국어 추가학습 시킨 모델
MikkelGroenning/distilbert-base-uncased-finetuned-emotion
59458c7ae43df5fcecae095966a9f0ba3deddb4f
2022-07-25T07:55:37.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
MikkelGroenning
null
MikkelGroenning/distilbert-base-uncased-finetuned-emotion
6
null
transformers
15,881
Entry not found
philschmid/distilbert-imdb-habana-remote-runner
8ad757ff4a389318d04cfa5a27ac9cafe6cba9cd
2022-07-25T08:22:00.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers" ]
text-classification
false
philschmid
null
philschmid/distilbert-imdb-habana-remote-runner
6
null
transformers
15,882
Entry not found
singhajeet13/autotrain-summarization-test-1177043812
d0f23884a4f24e8e3b9406c82a1468cda9adedca
2022-07-26T02:15:55.000Z
[ "pytorch", "bart", "text2text-generation", "en", "dataset:singhajeet13/autotrain-data-summarization-test", "transformers", "autotrain", "co2_eq_emissions", "autotrain_compatible" ]
text2text-generation
false
singhajeet13
null
singhajeet13/autotrain-summarization-test-1177043812
6
null
transformers
15,883
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - singhajeet13/autotrain-data-summarization-test co2_eq_emissions: 1166.308824861558 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 1177043812 - CO2 Emissions (in grams): 1166.308824861558 ## Validation Metrics - Loss: 1.6226013898849487 - Rouge1: 39.5734 - Rouge2: 18.9817 - RougeL: 33.257 - RougeLsum: 33.2571 - Gen Len: 19.84 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/singhajeet13/autotrain-summarization-test-1177043812 ```
huggingtweets/csjonas1mical-gunkbrain1-moeterpussy
90a40887db9b6cf73bf47ed05c6469e3fc12cd14
2022-07-26T04:21:26.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/csjonas1mical-gunkbrain1-moeterpussy
6
null
transformers
15,884
--- language: en thumbnail: http://www.huggingtweets.com/csjonas1mical-gunkbrain1-moeterpussy/1658809281049/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1525202290088595457/GfbtEnPO_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1525207243133689857/h9zu4iMK_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1525206197619642370/HPsBR4xY_400x400.jpg&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">freddy macintosh & Moe Ner & tobash tendril</div> <div style="text-align: center; font-size: 14px;">@csjonas1mical-gunkbrain1-moeterpussy</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from freddy macintosh & Moe Ner & tobash tendril. | Data | freddy macintosh | Moe Ner | tobash tendril | | --- | --- | --- | --- | | Tweets downloaded | 126 | 266 | 165 | | Retweets | 9 | 25 | 12 | | Short tweets | 15 | 23 | 19 | | Tweets kept | 102 | 218 | 134 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2eeslx7w/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @csjonas1mical-gunkbrain1-moeterpussy's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/184slvzr) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/184slvzr/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/csjonas1mical-gunkbrain1-moeterpussy') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
PGT/graphnystromformer-artificial-balanced-max500-210000-0
f41baa129366a4a0600eed7d9aa4561ea5957588
2022-07-25T20:10:00.000Z
[ "pytorch", "graph_nystromformer", "text-classification", "transformers" ]
text-classification
false
PGT
null
PGT/graphnystromformer-artificial-balanced-max500-210000-0
6
null
transformers
15,885
Entry not found
mshoaibsarwar/finetuning-sentiment-model-samples
b3d39392ba9b540355900b4c65416b7839947706
2022-07-25T21:54:57.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:imdb", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
mshoaibsarwar
null
mshoaibsarwar/finetuning-sentiment-model-samples
6
null
transformers
15,886
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: finetuning-sentiment-model-samples results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
BirdL/SimulacraPromptGPT
32206c09253390442d472498d9ae18eb1b753d7f
2022-07-26T00:33:19.000Z
[ "pytorch", "gpt_neo", "text-generation", "transformers", "license:apache-2.0" ]
text-generation
false
BirdL
null
BirdL/SimulacraPromptGPT
6
null
transformers
15,887
--- license: apache-2.0 ---
ultra-coder54732/roberta-base-twitter-prop-16-train-set
f035bd9dc3048f70a6731b4052c88d590865e214
2022-07-26T02:01:21.000Z
[ "pytorch", "tensorboard", "roberta", "text-classification", "transformers" ]
text-classification
false
ultra-coder54732
null
ultra-coder54732/roberta-base-twitter-prop-16-train-set
6
null
transformers
15,888
Entry not found
bongsoo/mdistilbertV1.1
5129e83cc11189921475cbd6c6104ae72006df85
2022-07-26T06:16:33.000Z
[ "pytorch", "distilbert", "fill-mask", "transformers", "en", "ko", "autotrain_compatible" ]
fill-mask
false
bongsoo
null
bongsoo/mdistilbertV1.1
6
null
transformers
15,889
--- pipeline_tag: fill-mask tags: - fill-mask - transformers - en - ko --- distil-base-multilingual-cased 에 kowiki20220620 정제된 말뭉치로 한국어 vocab 추가하여 한국어 추가학습 시킨 모델
WENGSYX/Dagnosis_Chinese_BERT
33ade7cec05955caeb4a0ab7d7fc57906958d467
2022-07-26T09:40:08.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
false
WENGSYX
null
WENGSYX/Dagnosis_Chinese_BERT
6
null
transformers
15,890
--- license: mit ---
ejin/bert-base-cased-finetuned-ner
4eb06cfdf2a9dfa715e1585de5721bd47942c0fb
2022-07-27T21:16:41.000Z
[ "pytorch", "tensorboard", "bert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
ejin
null
ejin/bert-base-cased-finetuned-ner
6
null
transformers
15,891
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-base-cased-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 config: conll2003 split: train args: conll2003 metrics: - name: Precision type: precision value: 0.8940432730834298 - name: Recall type: recall value: 0.9008612955320294 - name: F1 type: f1 value: 0.8974393350315055 - name: Accuracy type: accuracy value: 0.9749955848590098 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-cased-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0919 - Precision: 0.8940 - Recall: 0.9009 - F1: 0.8974 - Accuracy: 0.9750 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.1147 | 1.0 | 1756 | 0.0919 | 0.8940 | 0.9009 | 0.8974 | 0.9750 | ### Framework versions - Transformers 4.21.0 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
huggingtweets/khorax
c52afc72a9bfe692293da8861bddaaedd13e997e
2022-07-26T21:15:41.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/khorax
6
null
transformers
15,892
--- language: en thumbnail: http://www.huggingtweets.com/khorax/1658870136126/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1544440184653156353/O0KtLUg__400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Khorax "Kho" Lugnut</div> <div style="text-align: center; font-size: 14px;">@khorax</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Khorax "Kho" Lugnut. | Data | Khorax "Kho" Lugnut | | --- | --- | | Tweets downloaded | 3247 | | Retweets | 352 | | Short tweets | 363 | | Tweets kept | 2532 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/32yjy9s3/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @khorax's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1ws4j0jn) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1ws4j0jn/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/khorax') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
ultra-coder54732/robertabaseproper-prop-16-train-set
7e23b9e390c5f08de09db7ca125d4b040e847677
2022-07-27T00:19:39.000Z
[ "pytorch", "tensorboard", "roberta", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
ultra-coder54732
null
ultra-coder54732/robertabaseproper-prop-16-train-set
6
null
transformers
15,893
--- license: mit tags: - generated_from_trainer model-index: - name: robertabaseproper-prop-16-train-set results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robertabaseproper-prop-16-train-set This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
dminiotas05/distilbert-base-uncased-finetuned-ft780_class
9d9ac4906b577cba872d1d28b8c8bb561cf06cdf
2022-07-27T12:16:52.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
dminiotas05
null
dminiotas05/distilbert-base-uncased-finetuned-ft780_class
6
null
transformers
15,894
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-ft780_class results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ft780_class This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.9843 - Accuracy: 0.2047 - F1: 0.1823 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 2.1065 | 1.0 | 188 | 2.0425 | 0.1747 | 0.1248 | | 1.9642 | 2.0 | 376 | 1.9959 | 0.1987 | 0.1701 | | 1.9019 | 3.0 | 564 | 1.9843 | 0.2047 | 0.1823 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
wgarstka/knotted_proteins_demo_model
ea2c6d330e1e18ed9bdaf3ef77147644524562a7
2022-07-28T09:06:52.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
wgarstka
null
wgarstka/knotted_proteins_demo_model
6
null
transformers
15,895
Entry not found
zhenglianchi/NER-model
fdcbea9c4dd540f0b6f9020f3e8b1acf9b252859
2022-07-28T09:20:45.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
zhenglianchi
null
zhenglianchi/NER-model
6
null
transformers
15,896
Entry not found
okite97/roberta-base-news3
94d4924d3093200a5187f239a5893f7a99832d44
2022-07-28T15:40:06.000Z
[ "pytorch", "tensorboard", "roberta", "text-classification", "transformers", "generated_from_trainer", "license:mit", "model-index" ]
text-classification
false
okite97
null
okite97/roberta-base-news3
6
null
transformers
15,897
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: roberta-base-news3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-news3 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3468 - Accuracy: 0.8986 - F1: 0.9002 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.3102 | 1.0 | 61 | 0.3032 | 0.8971 | 0.8977 | | 0.1949 | 2.0 | 122 | 0.3036 | 0.8986 | 0.8976 | | 0.1322 | 3.0 | 183 | 0.3106 | 0.9029 | 0.9024 | | 0.0988 | 4.0 | 244 | 0.3468 | 0.8986 | 0.9002 | ### Framework versions - Transformers 4.21.0 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
yanaiela/roberta-base-epoch_83
dd858e6b17f0729249a30394d24341fa7b93ec5e
2022-07-29T23:10:09.000Z
[ "pytorch", "roberta", "fill-mask", "en", "dataset:wikipedia", "dataset:bookcorpus", "arxiv:1907.11692", "arxiv:2207.14251", "transformers", "roberta-base", "roberta-base-epoch_83", "license:mit", "autotrain_compatible" ]
fill-mask
false
yanaiela
null
yanaiela/roberta-base-epoch_83
6
null
transformers
15,898
--- language: en tags: - roberta-base - roberta-base-epoch_83 license: mit datasets: - wikipedia - bookcorpus --- # RoBERTa, Intermediate Checkpoint - Epoch 83 This model is part of our reimplementation of the [RoBERTa model](https://arxiv.org/abs/1907.11692), trained on Wikipedia and the Book Corpus only. We train this model for almost 100K steps, corresponding to 83 epochs. We provide the 84 checkpoints (including the randomly initialized weights before the training) to provide the ability to study the training dynamics of such models, and other possible use-cases. These models were trained in part of a work that studies how simple statistics from data, such as co-occurrences affects model predictions, which are described in the paper [Measuring Causal Effects of Data Statistics on Language Model's `Factual' Predictions](https://arxiv.org/abs/2207.14251). This is RoBERTa-base epoch_83. ## Model Description This model was captured during a reproduction of [RoBERTa-base](https://huggingface.co/roberta-base), for English: it is a Transformers model pretrained on a large corpus of English data, using the Masked Language Modelling (MLM). The intended uses, limitations, training data and training procedure for the fully trained model are similar to [RoBERTa-base](https://huggingface.co/roberta-base). Two major differences with the original model: * We trained our model for 100K steps, instead of 500K * We only use Wikipedia and the Book Corpus, as corpora which are publicly available. ### How to use Using code from [RoBERTa-base](https://huggingface.co/roberta-base), here is an example based on PyTorch: ``` from transformers import pipeline model = pipeline("fill-mask", model='yanaiela/roberta-base-epoch_83', device=-1, top_k=10) model("Hello, I'm the <mask> RoBERTa-base language model") ``` ## Citation info ```bibtex @article{2207.14251, Author = {Yanai Elazar and Nora Kassner and Shauli Ravfogel and Amir Feder and Abhilasha Ravichander and Marius Mosbach and Yonatan Belinkov and Hinrich Schütze and Yoav Goldberg}, Title = {Measuring Causal Effects of Data Statistics on Language Model's `Factual' Predictions}, Year = {2022}, Eprint = {arXiv:2207.14251}, } ```
platzi/platzi-vit-base-beans-omar-espejel
bda4572bdc7ff2e83bf63afa5b992940f37aab44
2022-07-28T18:59:01.000Z
[ "pytorch", "tensorboard", "vit", "image-classification", "dataset:beans", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
image-classification
false
platzi
null
platzi/platzi-vit-base-beans-omar-espejel
6
null
transformers
15,899
--- license: apache-2.0 tags: - image-classification - generated_from_trainer datasets: - beans metrics: - accuracy widget: - src: https://huggingface.co/platzi/platzi-vit-base-beans/resolve/main/healthy.jpeg example_title: Healthy - src: https://huggingface.co/platzi/platzi-vit-base-beans/resolve/main/bean_rust.jpeg example_title: Bean Rust model-index: - name: platzi-vit-base-beans results: - task: name: Image Classification type: image-classification dataset: name: beans type: beans config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9924812030075187 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0336 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1381 | 3.85 | 500 | 0.0336 | 0.9925 | ### Framework versions - Transformers 4.21.0 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1