modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
cestwc/bart-base-l2s
e7ef9d2a7f9f42573f35c570c7594c4483dd02e5
2022-06-12T16:10:11.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
cestwc
null
cestwc/bart-base-l2s
3
null
transformers
22,300
Entry not found
cestwc/roberta-base-unigram-ternary-wikilingua
6f2299cb1cda54eb899af996770b182f2f46e8a5
2022-05-01T09:11:03.000Z
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
cestwc
null
cestwc/roberta-base-unigram-ternary-wikilingua
3
null
transformers
22,301
Entry not found
Muennighoff/p-1-512-fp32
d5930757a5257094efadac0d0f33bc47292e3fd8
2022-05-01T12:32:58.000Z
[ "pytorch", "t5", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
Muennighoff
null
Muennighoff/p-1-512-fp32
3
null
transformers
22,302
--- license: apache-2.0 tags: - generated_from_trainer datasets: - xsum metrics: - rouge model-index: - name: p-1-512-fp32 results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: xsum type: xsum args: default metrics: - name: Rouge1 type: rouge value: 0.3108 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # p-1-512-fp32 This model is a fine-tuned version of [Muennighoff/t5-small-finetuned-xsum-512](https://huggingface.co/Muennighoff/t5-small-finetuned-xsum-512) on the xsum dataset. It achieves the following results on the evaluation set: - Loss: 16.6408 - Rouge1: 0.3108 - Rouge2: 0.0 - Rougel: 0.3091 - Rougelsum: 0.3102 - Gen Len: 18.8095 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 17.3139 | 1.0 | 7854 | 16.6408 | 0.3108 | 0.0 | 0.3091 | 0.3102 | 18.8095 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.10.2 - Datasets 2.1.0 - Tokenizers 0.12.1
cuzeverynameistaken/wav2vec2-base-timit-demo-colab1
107649d75fe283600856f7a9e294ad7d185bfd0e
2022-05-01T19:55:38.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
cuzeverynameistaken
null
cuzeverynameistaken/wav2vec2-base-timit-demo-colab1
3
null
transformers
22,303
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-colab1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-colab1 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7170 - Wer: 0.4784 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 60 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.1915 | 13.89 | 500 | 3.1318 | 1.0 | | 1.4993 | 27.78 | 1000 | 0.6736 | 0.5485 | | 0.3416 | 41.67 | 1500 | 0.7111 | 0.5092 | | 0.1937 | 55.56 | 2000 | 0.7170 | 0.4784 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
hf-internal-testing/wav2vec2-conformer-frame-class
b2978bb62cbc583ef7c6ca704712d22726c44c23
2022-05-01T16:03:38.000Z
[ "pytorch", "wav2vec2-conformer", "audio-frame-classification", "transformers" ]
null
false
hf-internal-testing
null
hf-internal-testing/wav2vec2-conformer-frame-class
3
null
transformers
22,304
Entry not found
PSW/mixed_sim3_seed42
9184591542fc490d3f4044adf30b68659a75bad1
2022-05-02T03:37:02.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/mixed_sim3_seed42
3
null
transformers
22,305
Entry not found
Martin97Bozic/bert-base-multilingual-uncased-finetuned-squad
6f793af4e417e3b1c4c69dc9647391df7b020137
2022-05-04T14:13:34.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Martin97Bozic
null
Martin97Bozic/bert-base-multilingual-uncased-finetuned-squad
3
null
transformers
22,306
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-multilingual-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-multilingual-uncased-finetuned-squad This model is a fine-tuned version of [bert-base-multilingual-uncased](https://huggingface.co/bert-base-multilingual-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.0109 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.0252 | 1.0 | 3163 | 0.9733 | | 0.7401 | 2.0 | 6326 | 0.9607 | | 0.516 | 3.0 | 9489 | 1.0109 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
waboucay/camembert-base-finetuned-xnli_fr-finetuned-nli-rua_wl
05756090a2c201536fba0e794abdd99630759a0d
2022-05-02T14:00:24.000Z
[ "pytorch", "camembert", "text-classification", "fr", "transformers", "nli" ]
text-classification
false
waboucay
null
waboucay/camembert-base-finetuned-xnli_fr-finetuned-nli-rua_wl
3
null
transformers
22,307
--- language: - fr tags: - nli metrics: - f1 --- ## Eval results We obtain the following results on ```validation``` and ```test``` sets: | Set | F1<sub>micro</sub> | F1<sub>macro</sub> | |------------|--------------------|--------------------| | validation | 69.9 | 69.9 | | test | 68.8 | 68.8 |
niklaspm/linkbert-large-finetuned-squad
ec33b5109830a7e32076a3af678e318a5e9ca574
2022-05-03T07:51:30.000Z
[ "pytorch", "bert", "question-answering", "arxiv:2203.15827", "transformers", "license:apache-2.0", "autotrain_compatible" ]
question-answering
false
niklaspm
null
niklaspm/linkbert-large-finetuned-squad
3
null
transformers
22,308
--- license: apache-2.0 --- --- license: apache-2.0 --- **Exact Match** 92.68 **F1** 86.5 Checkout [linkbert-base-finetuned-squad](https://huggingface.co/niklaspm/linkbert-base-finetuned-squad) See [LinkBERT Paper](https://arxiv.org/abs/2203.15827)
armanc/affiliations-roberta-base-0.0.1
9d66b84fd6cd201f2b67bcb38d1049b567dbff76
2022-05-02T20:31:34.000Z
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
false
armanc
null
armanc/affiliations-roberta-base-0.0.1
3
null
transformers
22,309
Entry not found
veronica320/ADEPT_bert
c014549bc100bd6addfbe22e964c034cff33a438
2022-05-03T02:23:38.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
veronica320
null
veronica320/ADEPT_bert
3
null
transformers
22,310
Entry not found
veronica320/ADEPT_roberta
8fd7efec24b425fb788a84d822824382acf2679a
2022-05-03T02:25:24.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
veronica320
null
veronica320/ADEPT_roberta
3
null
transformers
22,311
Entry not found
PSW/min_senttrm_del_seed1
722ad22042a7b3503fda95d52fd1f7b9bec5694a
2022-05-03T13:52:08.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/min_senttrm_del_seed1
3
null
transformers
22,312
Entry not found
IsekaiMeta/dapprf3
28a9bf9df01c6070b0b852ef8bfb90c028458ffb
2022-05-03T17:55:50.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
IsekaiMeta
null
IsekaiMeta/dapprf3
3
null
transformers
22,313
--- tags: - conversational --- #dapprf3
laituan245/molt5-base-smiles2caption
7b7d4b0ab8b66b351e669b1f66272418ba15c3d9
2022-05-03T18:07:57.000Z
[ "pytorch", "t5", "text2text-generation", "arxiv:2204.11817", "transformers", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
laituan245
null
laituan245/molt5-base-smiles2caption
3
null
transformers
22,314
--- license: apache-2.0 --- This model can be used to generate an input caption from a SMILES string. ## Example Usage ```python from transformers import T5Tokenizer, T5ForConditionalGeneration tokenizer = T5Tokenizer.from_pretrained("laituan245/molt5-base-smiles2caption", model_max_length=512) model = T5ForConditionalGeneration.from_pretrained('laituan245/molt5-base-smiles2caption') input_text = 'C1=CC2=C(C(=C1)[O-])NC(=CC2=O)C(=O)O' input_ids = tokenizer(input_text, return_tensors="pt").input_ids outputs = model.generate(input_ids, num_beams=5, max_length=512) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` ## Paper For more information, please take a look at our paper. Paper: [Translation between Molecules and Natural Language](https://arxiv.org/abs/2204.11817) Authors: *Carl Edwards\*, Tuan Lai\*, Kevin Ros, Garrett Honke, Heng Ji*
r4ndomw4lk/distilbert-base-uncased-finetuned-emotion
aa2cb3084007bdb699253e45cd90fa6f43550e2d
2022-05-03T18:28:02.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
r4ndomw4lk
null
r4ndomw4lk/distilbert-base-uncased-finetuned-emotion
3
null
transformers
22,315
Entry not found
chrisvinsen/xlsr-wav2vec2-base-commonvoice-demo-colab-5
9a830f2d577f827cf8435cf459835a7a0a06d66a
2022-05-05T03:52:35.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
chrisvinsen
null
chrisvinsen/xlsr-wav2vec2-base-commonvoice-demo-colab-5
3
null
transformers
22,316
Entry not found
DioLiu/distilroberta-base-less-Taylor
a9ace371d88e0febdc81647dce54cfec434a2d36
2022-05-04T06:24:21.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
DioLiu
null
DioLiu/distilroberta-base-less-Taylor
3
null
transformers
22,317
Entry not found
waboucay/camembert-base-finetuned-nli-repnum_wl
04368878c13ac47105e493c39e2eecc447a9c259
2022-05-04T09:27:26.000Z
[ "pytorch", "camembert", "text-classification", "fr", "transformers", "nli" ]
text-classification
false
waboucay
null
waboucay/camembert-base-finetuned-nli-repnum_wl
3
null
transformers
22,318
--- language: - fr tags: - nli metrics: - f1 --- ## Eval results We obtain the following results on ```validation``` and ```test``` sets: | Set | F1<sub>micro</sub> | F1<sub>macro</sub> | |------------|--------------------|--------------------| | validation | 74.6 | 74.5 | | test | 77.8 | 77.8 |
waboucay/camembert-base-finetuned-xnli_fr-finetuned-nli-repnum_wl
71e73c5a65f63da08104c8ae9163a5ae69c63497
2022-05-04T09:31:42.000Z
[ "pytorch", "camembert", "text-classification", "fr", "transformers", "nli" ]
text-classification
false
waboucay
null
waboucay/camembert-base-finetuned-xnli_fr-finetuned-nli-repnum_wl
3
null
transformers
22,319
--- language: - fr tags: - nli metrics: - f1 --- ## Eval results We obtain the following results on ```validation``` and ```test``` sets: | Set | F1<sub>micro</sub> | F1<sub>macro</sub> | |------------|--------------------|--------------------| | validation | 73.3 | 73.3 | | test | 69.4 | 69.4 |
jenspt/roberta
38ab3133fa4b720a0e285bd4be76c2eb572b6ce7
2022-05-04T13:30:54.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
jenspt
null
jenspt/roberta
3
null
transformers
22,320
Entry not found
vblagoje/greaselm-obqa
b92d40155f3783e9dc952b86cf3f77614ef64c94
2022-05-28T14:02:15.000Z
[ "pytorch", "greaselm", "transformers" ]
null
false
vblagoje
null
vblagoje/greaselm-obqa
3
null
transformers
22,321
Entry not found
BigSalmon/MediumInformalToFormalLincoln4
31e57f9f27d34c6ec73b383e5f1cb2ce9db15198
2022-05-04T21:12:03.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
BigSalmon
null
BigSalmon/MediumInformalToFormalLincoln4
3
null
transformers
22,322
``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MediumInformalToFormalLincoln4") model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln4") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (Warriors vs. Rockets in Game 7): text: eagerly anticipated by fans, game 7's are the highlight of the post-season. text: ever-building in suspense, game 7's have the crowd captivated. *** Essay Intro (South Korean TV Is Becoming Popular): text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ). text: increasingly held in critical esteem, south korean television continues to impress. text: at the forefront of quality content, south korea is quickly achieving celebrity status. *** Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. text: failing to draw in the masses, the nba has ( fallen into / succumb to / bowed to ) disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap ( solutions / interventions / enhancements ) could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ``` ``` wordy: classical music is becoming less popular more and more. Translate into Concise Text: interest in classic music is fading. *** wordy: ``` ``` sweet: savvy voters ousted him. longer: voters who were informed delivered his defeat. *** sweet: ``` ``` 1: commercial space company spacex plans to launch a whopping 52 flights in 2022. 2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022. 3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights. 4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company. 5: a commercial space company, spacex aims to conduct 52 flights in 2022. *** 1: ``` Keywords to sentences or sentence. ``` ngos are characterized by: □ voluntary citizens' group that is organized on a local, national or international level □ encourage political participation □ often serve humanitarian functions □ work for social, economic, or environmental change *** what are the drawbacks of living near an airbnb? □ noise □ parking □ traffic □ security □ strangers *** ``` ``` original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung. adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung. *** original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark. adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark. *** original: ```
PSW/low_resource_percent1_maxsimins_seed1
47c324b8bc29509b8cbfe0718a079abc9817b24e
2022-05-05T06:19:01.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent1_maxsimins_seed1
3
null
transformers
22,323
Entry not found
PSW/low_resource_percent1_maxsimins_seed27
5b0452c29e8d86252cbc4fa85c18ceb1c223832c
2022-05-05T06:30:06.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent1_maxsimins_seed27
3
null
transformers
22,324
Entry not found
PSW/low_resource_percent1_minsimdel_seed1
011c9bda560667176ea79ff5a767ce1d2765659e
2022-05-05T07:24:48.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent1_minsimdel_seed1
3
null
transformers
22,325
Entry not found
PSW/low_resource_percent1_minsimdel_seed27
b330170e8ed039ab1619d63206042d4a240b4a0e
2022-05-05T07:35:28.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent1_minsimdel_seed27
3
null
transformers
22,326
Entry not found
CarlCochet/trajectory-transformer-ant-medium-v2
d05ef7360952a716d8d6852fd8ebe687c09fc1a3
2022-05-12T16:57:57.000Z
[ "pytorch", "trajectory_transformer", "feature-extraction", "transformers", "license:mit" ]
feature-extraction
false
CarlCochet
null
CarlCochet/trajectory-transformer-ant-medium-v2
3
null
transformers
22,327
--- license: mit ---
CarlCochet/trajectory-transformer-halfcheetah-medium-expert-v2
93b1ac9bcca494f9c07c965548a57b0cbdf9bd4b
2022-05-12T17:01:20.000Z
[ "pytorch", "trajectory_transformer", "feature-extraction", "transformers", "license:mit" ]
feature-extraction
false
CarlCochet
null
CarlCochet/trajectory-transformer-halfcheetah-medium-expert-v2
3
null
transformers
22,328
--- license: mit ---
CarlCochet/trajectory-transformer-hopper-medium-replay-v2
3957253bdb8022c2b6496250b5a007332a2c1c81
2022-05-12T17:04:54.000Z
[ "pytorch", "trajectory_transformer", "feature-extraction", "transformers", "license:mit" ]
feature-extraction
false
CarlCochet
null
CarlCochet/trajectory-transformer-hopper-medium-replay-v2
3
null
transformers
22,329
--- license: mit ---
CarlCochet/trajectory-transformer-walker2d-medium-replay-v2
2357417b0beede5d88d9f8604faf307a10f820ef
2022-05-12T17:07:29.000Z
[ "pytorch", "trajectory_transformer", "feature-extraction", "transformers", "license:mit" ]
feature-extraction
false
CarlCochet
null
CarlCochet/trajectory-transformer-walker2d-medium-replay-v2
3
null
transformers
22,330
--- license: mit ---
DioLiu/distilroberta-base-OnlyShakeMask
92f670fdadc94dfe6e65aaae70d147b5fd15c38a
2022-05-05T11:29:27.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
DioLiu
null
DioLiu/distilroberta-base-OnlyShakeMask
3
null
transformers
22,331
Entry not found
PSW/low_resource_percent10_minmaxswap_seed42
58b55714ce0de848627d9f62e49ee6efac23b587
2022-05-05T11:14:48.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent10_minmaxswap_seed42
3
null
transformers
22,332
Entry not found
ghabin/dystopian_romans
40279ac918f7f877180cfb745d3a47ffc4ea7f4d
2022-05-05T11:30:53.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "license:afl-3.0" ]
text-generation
false
ghabin
null
ghabin/dystopian_romans
3
null
transformers
22,333
--- license: afl-3.0 ---
PSW/low_resource_percent10_minsimdel_seed27
5b1dcd10f03b4d1454798c319a2a790439d9464f
2022-05-05T11:44:37.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent10_minsimdel_seed27
3
null
transformers
22,334
Entry not found
catofnull/BERT-fold1
1c9d397cac144cc77ec30d0a7f5258dee82d884a
2022-05-05T11:46:22.000Z
[ "pytorch", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
catofnull
null
catofnull/BERT-fold1
3
null
transformers
22,335
Entry not found
SophieTr/PP0_rm_v1_gpu
7e87ed07a67080cd1f0a427a1d8f27c716b6c585
2022-05-05T12:28:09.000Z
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
SophieTr
null
SophieTr/PP0_rm_v1_gpu
3
null
transformers
22,336
Entry not found
AlekseyKorshuk/opt-350m
cd1d4cf5293286eb467434036c6aeba040c740ac
2022-06-25T16:47:22.000Z
[ "pytorch", "opt", "text-generation", "transformers", "license:apache-2.0" ]
text-generation
false
AlekseyKorshuk
null
AlekseyKorshuk/opt-350m
3
1
transformers
22,337
--- license: apache-2.0 ---
dyyyyyyyy/xTune_squad_XLM-RoBERTa-base
05eecb3bf3a6f7c69ce5b52a28851a0270bc0264
2022-05-05T14:08:27.000Z
[ "pytorch", "xlm-roberta", "transformers" ]
null
false
dyyyyyyyy
null
dyyyyyyyy/xTune_squad_XLM-RoBERTa-base
3
null
transformers
22,338
Entry not found
tau/False_large_pmi_para0_sent1_span2_itFalse_sargmax_rrFalse_8_1024_0.15_1
c5496aed39d255628b3d2a2e2ce860a9d3962260
2022-05-05T14:01:41.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
tau
null
tau/False_large_pmi_para0_sent1_span2_itFalse_sargmax_rrFalse_8_1024_0.15_1
3
null
transformers
22,339
Entry not found
AlekseyKorshuk/opt-125m
5841a04877d7765fb33c4376d3217158e90a0dca
2022-05-05T17:42:11.000Z
[ "pytorch", "opt", "text-generation", "transformers", "license:apache-2.0" ]
text-generation
false
AlekseyKorshuk
null
AlekseyKorshuk/opt-125m
3
null
transformers
22,340
--- license: apache-2.0 ---
nguyenmanhbao/finetuning-sentiment-model-3000-samples
f64c86098f1b069447b44e0f8065867e7ea1fd59
2022-05-05T19:18:18.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers" ]
text-classification
false
nguyenmanhbao
null
nguyenmanhbao/finetuning-sentiment-model-3000-samples
3
null
transformers
22,341
Entry not found
ekimz/t5_ttmodel
ac27f40a3622d50e153e0c80d01fdccf6cb68078
2022-05-05T19:55:05.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
ekimz
null
ekimz/t5_ttmodel
3
null
transformers
22,342
Entry not found
PSW/low_resource_percent20_seed1
30c919fcaeaefb1490a7072deaa3c984951bf2fb
2022-05-05T20:03:33.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent20_seed1
3
null
transformers
22,343
Entry not found
huggingtweets/theovalpawffice
3e324906eadb0cc7abd571bc62bd5b69f05c4a21
2022-05-05T20:41:08.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/theovalpawffice
3
null
transformers
22,344
--- language: en thumbnail: http://www.huggingtweets.com/theovalpawffice/1651782387551/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1346560834613469184/LJVlGDRS_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">The Oval Pawffice® 🇺🇸 DOTUS Fans</div> <div style="text-align: center; font-size: 14px;">@theovalpawffice</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from The Oval Pawffice® 🇺🇸 DOTUS Fans. | Data | The Oval Pawffice® 🇺🇸 DOTUS Fans | | --- | --- | | Tweets downloaded | 3250 | | Retweets | 68 | | Short tweets | 106 | | Tweets kept | 3076 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/uraeqzqr/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @theovalpawffice's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3jrqovr7) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3jrqovr7/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/theovalpawffice') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
armanc/affiliations-roberta-base-step18K-loss-0.099
2e6afb8881cb7e26fd112b5c2c2dd01f90a2931f
2022-05-06T02:53:52.000Z
[ "pytorch", "transformers" ]
null
false
armanc
null
armanc/affiliations-roberta-base-step18K-loss-0.099
3
null
transformers
22,345
Entry not found
xingqiang/nezha-zh-address-match-finetuned
b1ed84e39535ecbf4eb59a589fc18afe8d1f9967
2022-06-03T07:47:03.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
xingqiang
null
xingqiang/nezha-zh-address-match-finetuned
3
null
transformers
22,346
### 中文地址匹配任务
shoubhik/electra_abbv_20k_data_multilabel_auc_0.89
21a5dc5758c332f59cfbfb7b731d88604b21f2fb
2022-05-06T05:40:59.000Z
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
false
shoubhik
null
shoubhik/electra_abbv_20k_data_multilabel_auc_0.89
3
null
transformers
22,347
Entry not found
catofnull/Pretrain3-fold1
0b0c2dfe02fa6cfd76ed0c8733419cb134211f4a
2022-05-11T07:04:32.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
catofnull
null
catofnull/Pretrain3-fold1
3
null
transformers
22,348
Entry not found
PrajwalS/wav2vec2_custom_model_50
b43addea9e403fd96a74bfbcaab3ade9670ae798
2022-05-08T16:33:22.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
PrajwalS
null
PrajwalS/wav2vec2_custom_model_50
3
null
transformers
22,349
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2_custom_model_50 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2_custom_model_50 This model is a fine-tuned version of [facebook/wav2vec2-large-960h-lv60-self](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu102 - Datasets 1.18.4 - Tokenizers 0.11.6
crabz/exp4
1ee8b52b44562c7e31d6d200f158163807b67154
2022-05-06T10:02:47.000Z
[ "pytorch", "roberta", "transformers" ]
null
false
crabz
null
crabz/exp4
3
null
transformers
22,350
Entry not found
lucifermorninstar011/autotrain-lucifer_multi_auto-831626529
936872923add3566410905f1daff8a889f618b84
2022-05-07T05:22:24.000Z
[ "pytorch", "bert", "text-classification", "en", "dataset:lucifermorninstar011/autotrain-data-lucifer_multi_auto", "transformers", "autotrain", "co2_eq_emissions" ]
text-classification
false
lucifermorninstar011
null
lucifermorninstar011/autotrain-lucifer_multi_auto-831626529
3
null
transformers
22,351
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - lucifermorninstar011/autotrain-data-lucifer_multi_auto co2_eq_emissions: 1418.583772776962 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 831626529 - CO2 Emissions (in grams): 1418.583772776962 ## Validation Metrics - Loss: 0.019245266914367676 - Accuracy: 0.9971231760498559 - Macro F1: 0.9917225353498834 - Micro F1: 0.9971231760498559 - Weighted F1: 0.9971219017846226 - Macro Precision: 0.9903556981858435 - Micro Precision: 0.9971231760498559 - Weighted Precision: 0.9971268798191825 - Macro Recall: 0.9931423442532272 - Micro Recall: 0.9971231760498559 - Weighted Recall: 0.9971231760498559 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/lucifermorninstar011/autotrain-lucifer_multi_auto-831626529 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("lucifermorninstar011/autotrain-lucifer_multi_auto-831626529", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("lucifermorninstar011/autotrain-lucifer_multi_auto-831626529", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
huggingtweets/finnegansreader
2dc88f45cea10537930d2798a1aa6efa6ea276d2
2022-05-06T19:04:03.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/finnegansreader
3
null
transformers
22,352
--- language: en thumbnail: http://www.huggingtweets.com/finnegansreader/1651863836821/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/378800000425274798/e6f9ae4914b86c7be5bd1e68d451b2cd_400x400.jpeg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Finnegans Wake</div> <div style="text-align: center; font-size: 14px;">@finnegansreader</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Finnegans Wake. | Data | Finnegans Wake | | --- | --- | | Tweets downloaded | 3250 | | Retweets | 0 | | Short tweets | 0 | | Tweets kept | 3250 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/26stpp9q/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @finnegansreader's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2s557xc1) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2s557xc1/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/finnegansreader') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
allenai/tk-instruct-3b-pos
33868e0032073f2a6183a87a19d0cc1a7bc1bee8
2022-05-27T06:30:40.000Z
[ "pytorch", "t5", "text2text-generation", "en", "dataset:natural instructions v2.0", "arxiv:1910.10683", "arxiv:2204.07705", "transformers", "license:apache-2.0", "autotrain_compatible" ]
text2text-generation
false
allenai
null
allenai/tk-instruct-3b-pos
3
null
transformers
22,353
--- language: en license: apache-2.0 datasets: - natural instructions v2.0 --- # Model description Tk-Instruct is a series of encoder-decoder Transformer models that are trained to solve various NLP tasks by following in-context instructions (plain language task definitions, k-shot examples, explanations, etc). Built upon the pre-trained [T5 models](https://arxiv.org/abs/1910.10683), they are fine-tuned on a large number of tasks & instructions that are collected in the [Natural Instructions benchmark](https://github.com/allenai/natural-instructions), which contains 1600+ tasks in 70+ broach categories in total. This enables the model to not only process the training tasks, but also generalize to many unseen tasks without further parameter update. More resources for using the model: - **Paper**: [link](https://arxiv.org/abs/2204.07705) - **Code repository**: [Tk-Instruct](https://github.com/yizhongw/Tk-Instruct) - **Official Website**: [Natural Instructions](https://instructions.apps.allenai.org/) - **All released models**: [allenai/tk-instruct](https://huggingface.co/models?search=allenai/tk-instruct) ## Intended uses & limitations Tk-Instruct can be used to do many NLP tasks by following instructions. ### How to use When instructing the model, task definition or demonstration examples or explanations should be prepended to the original input and fed into the model. You can easily try Tk-Instruct models as follows: ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("allenai/tk-instruct-3b-def") >>> model = AutoModelForSeq2SeqLM.from_pretrained("allenai/tk-instruct-3b-def") >>> input_ids = tokenizer.encode( "Definition: return the currency of the given country. Now complete the following example - Input: India. Output:", return_tensors="pt") >>> output = model.generate(input_ids, max_length=10) >>> output = tokenizer.decode(output[0], skip_special_tokens=True) # model should output 'Indian Rupee' >>> input_ids = tokenizer.encode( "Definition: negate the following sentence. Input: John went to school. Output:", return_tensors="pt") >>> output = model.generate(input_ids, max_length=10) >>> output = tokenizer.decode(output[0], skip_special_tokens=True) # model should output 'John did not go to shool.' ``` ### Limitations We are still working on understanding the behaviors of these models, but here are several issues we have found: - Models are generally sensitive to the instruction. Sometimes rewording the instruction can lead to very different output. - Models are not always compliant to the instruction. Sometimes the model don't follow your instruction (e.g., when you ask the model to generate one sentence, it might still generate one word or a long story). - Models might totally fail on some tasks. If you find serious issues or any interesting result, you are welcome to share with us! ## Training data Tk-Instruct is trained using the tasks & instructions in [Natural Instructions benchmark](https://github.com/allenai/natural-instructions), which contains 1600+ tasks in 70+ broach categories in total. We follow the official train/test split. Tk-Instruct model series were trained using 757 tasks, and mTk-Instruct series were trained using 1271 tasks (including some non-English tasks). The training tasks are in 64 broad categories, such as text categorization / question answering / sentiment analysis / summarization / grammar error detection / dialogue generation / etc. The other 12 categories are selected for evaluation. ## Training procedure All our models are initialized from either T5 models or mT5 models. Because generating the output can be regarded as a form of language modeling, we used their [LM adapted version](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#lm-adapted-t511lm100k). All data is converted into a text-to-text format, and models are fine-tuned to maximize the likelihood of the output sequence. Our [released models](https://huggingface.co/models?search=allenai/tk-instruct) are in different sizes, and each of them was trained with a specific type of instruction encoding. For instance, `tk-instruct-3b-def-pos` was initialized from [t5-xl-lm-adapt](https://huggingface.co/google/t5-xl-lm-adapt), and it saw task definition & 2 positive examples as the instruction during training time. Although they are trained with only one type of instruction encodings, we found they can usually work with other type of encodings at test time (see more in our paper). ### BibTeX entry and citation info ```bibtex @article{wang2022benchmarking, title={Benchmarking Generalization via In-Context Instructions on 1,600+ Language Tasks}, author={Yizhong Wang and Swaroop Mishra and Pegah Alipoormolabashi and Yeganeh Kordi and Amirreza Mirzaei and A. Arunkumar and Arjun Ashok and Arut Selvan Dhanasekaran and Atharva Naik and David Stap and Eshaan Pathak and Giannis Karamanolakis and Haizhi Gary Lai and Ishan Purohit and Ishani Mondal and Jacob Anderson and Kirby Kuznia and Krima Doshi and Maitreya Patel and Kuntal Kumar Pal and M. Moradshahi and Mihir Parmar and Mirali Purohit and Neeraj Varshney and Phani Rohitha Kaza and Pulkit Verma and Ravsehaj Singh Puri and Rushang Karia and Shailaja Keyur Sampat and Savan Doshi and Siddharth Deepak Mishra and Sujan C. Reddy and Sumanta Patro and Tanay Dixit and Xu-dong Shen and Chitta Baral and Yejin Choi and Hannaneh Hajishirzi and Noah A. Smith and Daniel Khashabi}, year={2022}, archivePrefix={arXiv}, eprint={2204.07705}, primaryClass={cs.CL}, } ```
kneis/distilbert-sentiment-adversarial-training
f0b2dae7a63cd8cdd3a1a1ece676a92b79d20237
2022-05-06T21:54:42.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
kneis
null
kneis/distilbert-sentiment-adversarial-training
3
null
transformers
22,354
Entry not found
VoltaicDaniel/distilgpt2-finetuned-wikitext2
61b6c50cb45c41be24bb348d0caf4ee4cd2adc9e
2022-05-08T03:51:55.000Z
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-generation
false
VoltaicDaniel
null
VoltaicDaniel/distilgpt2-finetuned-wikitext2
3
null
transformers
22,355
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilgpt2-finetuned-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-wikitext2 This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.1909 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 18 | 4.2070 | | No log | 2.0 | 36 | 4.1958 | | No log | 3.0 | 54 | 4.1909 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
bko/bert-base-uncased-finetuned-swag
088f1f2b7226f71a7b4fffabb1bfeddc969588cc
2022-05-07T11:41:33.000Z
[ "pytorch", "tensorboard", "bert", "multiple-choice", "dataset:swag", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
multiple-choice
false
bko
null
bko/bert-base-uncased-finetuned-swag
3
null
transformers
22,356
--- license: apache-2.0 tags: - generated_from_trainer datasets: - swag metrics: - accuracy model-index: - name: bert-base-uncased-finetuned-swag results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-swag This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the swag dataset. It achieves the following results on the evaluation set: - Loss: 1.0099 - Accuracy: 0.7917 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.7577 | 1.0 | 4597 | 0.6133 | 0.7624 | | 0.3729 | 2.0 | 9194 | 0.6351 | 0.7841 | | 0.1405 | 3.0 | 13791 | 1.0099 | 0.7917 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Khalsuu/english-filipino-wav2vec2-l-xls-r-test-07
da15d6464af6a42cb2d7bac9dbff2f9a4003c496
2022-05-07T19:19:09.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:filipino_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Khalsuu
null
Khalsuu/english-filipino-wav2vec2-l-xls-r-test-07
3
null
transformers
22,357
--- license: apache-2.0 tags: - generated_from_trainer datasets: - filipino_voice model-index: - name: english-filipino-wav2vec2-l-xls-r-test-07 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # english-filipino-wav2vec2-l-xls-r-test-07 This model is a fine-tuned version of [jonatasgrosman/wav2vec2-large-xlsr-53-english](https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-english) on the filipino_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.6768 - Wer: 0.3755 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 2.9255 | 2.09 | 400 | 0.7742 | 0.7694 | | 0.5792 | 4.19 | 800 | 0.5368 | 0.5250 | | 0.3611 | 6.28 | 1200 | 0.4796 | 0.4718 | | 0.2742 | 8.38 | 1600 | 0.5308 | 0.4764 | | 0.201 | 10.47 | 2000 | 0.5885 | 0.4723 | | 0.164 | 12.57 | 2400 | 0.5595 | 0.4750 | | 0.1374 | 14.66 | 2800 | 0.5836 | 0.4366 | | 0.1138 | 16.75 | 3200 | 0.6110 | 0.4628 | | 0.0991 | 18.85 | 3600 | 0.6179 | 0.4174 | | 0.0837 | 20.94 | 4000 | 0.6681 | 0.4170 | | 0.0722 | 23.04 | 4400 | 0.6665 | 0.4103 | | 0.0576 | 25.13 | 4800 | 0.7538 | 0.4068 | | 0.052 | 27.23 | 5200 | 0.6808 | 0.3844 | | 0.0449 | 29.32 | 5600 | 0.6768 | 0.3755 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
scasutt/wav2vec2-large-xlsr-52_Swiss_German
b34b5ecf37d128ca97bf37e5516e9323cb6d20c1
2022-05-25T10:38:04.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
scasutt
null
scasutt/wav2vec2-large-xlsr-52_Swiss_German
3
null
transformers
22,358
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-large-xlsr-53_full_train results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53_full_train This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the Swissdial dataset. It achieves the following results on the evaluation set: - Loss: 0.2811 - Wer: 0.2909 ## Model description Wav2Vec2-XLSR-53 trained on augmented Swiss Dial dataset ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.7666 | 2.69 | 1000 | 0.4356 | 0.4954 | | 0.7868 | 5.39 | 2000 | 0.2693 | 0.3180 | | 0.6948 | 8.09 | 3000 | 0.2811 | 0.2909 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu102 - Datasets 2.2.1 - Tokenizers 0.12.1
vuiseng9/nncf-qat-kd-bert-l-squadv1.1-sl256
c65b8ea22ab613209d6941bf2ef87139d2e8ef31
2022-05-07T17:15:19.000Z
[ "pytorch", "onnx", "bert", "dataset:squad", "transformers", "license:apache-2.0", "model-index" ]
null
false
vuiseng9
null
vuiseng9/nncf-qat-kd-bert-l-squadv1.1-sl256
3
null
transformers
22,359
--- license: apache-2.0 datasets: - squad model-index: - name: nncf-qat-kd-bert-l-squadv1.1-sl256 results: [] --- This model is quantized version of ```vuiseng9/bert-l-squadv1.1-sl256``` using OpenVINO NNCF. ### Training ```bash # used 4xV100 GPUS # --fp16 for lower turnaround and resource requirement python run_qa.py \ --model_name_or_path vuiseng9/bert-l-squadv1.1-sl256 \ --dataset_name squad \ --do_eval \ --do_train \ --evaluation_strategy steps \ --eval_steps 250 \ --learning_rate 3e-5 \ --fp16 \ --num_train_epochs 2 \ --per_device_eval_batch_size 64 \ --per_device_train_batch_size 8 \ --max_seq_length 256 \ --doc_stride 128 \ --save_steps 500 \ --logging_steps 1 \ --overwrite_output_dir \ --nncf_config nncf_bert_config_squad_kd.json \ #stock config which has seq.len modified to 256. --run_name $RUNID \ --output_dir $OUTDIR ``` ### Evaluation Require ```vuiseng9/transformers (fork)``` , commit: ```ff24569b```, NNCF v2.1+ commit (```8e26365```) ```bash git clone https://huggingface.co/vuiseng9/nncf-qat-kd-bert-l-squadv1.1-sl256 python run_qa.py \ --model_name_or_path ./nncf-qat-kd-bert-l-squadv1.1-sl256 \ --dataset_name squad \ --nncf_config ./nncf-qat-kd-bert-l-squadv1.1-sl256/nncf_bert_config_squad_kd.json \ --nncf_ckpt ./nncf-qat-kd-bert-l-squadv1.1-sl256 \ --do_eval \ --per_device_eval_batch_size 128 \ --max_seq_length 256 \ --doc_stride 128 \ --output_dir /tmp/eval-nncf-qat-kd-bert-l-squadv1.1-sl256 \ --overwrite_output_dir ``` ### Results ``` eval_exact_match = 87.1902 eval_f1 = 93.0286 eval_samples = 12097 ```
lucifermorninstar011/autotrain-lucifer_ner_multi-838326726
f648c2bb4a3d8e9d048bee5be108eed68f3b0913
2022-05-08T11:40:07.000Z
[ "pytorch", "distilbert", "token-classification", "en", "dataset:lucifermorninstar011/autotrain-data-lucifer_ner_multi", "transformers", "autotrain", "co2_eq_emissions", "autotrain_compatible" ]
token-classification
false
lucifermorninstar011
null
lucifermorninstar011/autotrain-lucifer_ner_multi-838326726
3
null
transformers
22,360
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - lucifermorninstar011/autotrain-data-lucifer_ner_multi co2_eq_emissions: 3.409136901758606 --- # Model Trained Using AutoTrain - Problem type: Entity Extraction - Model ID: 838326726 - CO2 Emissions (in grams): 3.409136901758606 ## Validation Metrics - Loss: 0.003970975521951914 - Accuracy: 0.9991803230435035 - Precision: 0.9969928464523109 - Recall: 0.997096050476826 - F1: 0.9970444457939075 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/lucifermorninstar011/autotrain-lucifer_ner_multi-838326726 ``` Or Python API: ``` from transformers import AutoModelForTokenClassification, AutoTokenizer model = AutoModelForTokenClassification.from_pretrained("lucifermorninstar011/autotrain-lucifer_ner_multi-838326726", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("lucifermorninstar011/autotrain-lucifer_ner_multi-838326726", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
Jiexing/sparc_add_coref_and_depen_t5_3b-2304
29ceb5b160e8cf12bdff8add554301672c78eabc
2022-05-08T04:57:08.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Jiexing
null
Jiexing/sparc_add_coref_and_depen_t5_3b-2304
3
null
transformers
22,361
Entry not found
DioLiu/distilroberta-base-OnlyWikiMask
b3389921de0485bf4cc7c39871f39b8e8994d981
2022-05-08T08:23:50.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
DioLiu
null
DioLiu/distilroberta-base-OnlyWikiMask
3
null
transformers
22,362
Entry not found
anuragshas/wav2vec2-xls-r-300m-mr-cv9-with-lm
d97a00805336f02b81f689d53dc2ef0523875277
2022-05-17T22:48:20.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "mr", "dataset:mozilla-foundation/common_voice_9_0", "transformers", "mozilla-foundation/common_voice_9_0", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
anuragshas
null
anuragshas/wav2vec2-xls-r-300m-mr-cv9-with-lm
3
null
transformers
22,363
--- language: - mr license: apache-2.0 tags: - automatic-speech-recognition - mozilla-foundation/common_voice_9_0 - generated_from_trainer datasets: - mozilla-foundation/common_voice_9_0 metrics: - wer model-index: - name: XLS-R-300M - Marathi results: - task: type: automatic-speech-recognition name: Speech Recognition dataset: type: mozilla-foundation/common_voice_9_0 name: Common Voice 9 args: mr metrics: - type: wer value: 23.841 name: Test WER - name: Test CER type: cer value: 5.522 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_9_0 - MR dataset. It achieves the following results on the evaluation set: - Loss: 0.3642 - Wer: 0.4190 - Cer: 0.0946 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7.5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 6124 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:------:|:----:|:---------------:|:------:|:------:| | 3.5184 | 12.9 | 400 | 3.4210 | 1.0 | 1.0 | | 2.3797 | 25.81 | 800 | 1.1068 | 0.8389 | 0.2584 | | 1.5022 | 38.71 | 1200 | 0.5278 | 0.6280 | 0.1517 | | 1.3181 | 51.61 | 1600 | 0.4254 | 0.5587 | 0.1297 | | 1.2037 | 64.52 | 2000 | 0.3836 | 0.5143 | 0.1176 | | 1.1245 | 77.42 | 2400 | 0.3643 | 0.4871 | 0.1111 | | 1.0582 | 90.32 | 2800 | 0.3562 | 0.4676 | 0.1062 | | 1.0027 | 103.23 | 3200 | 0.3530 | 0.4625 | 0.1058 | | 0.9382 | 116.13 | 3600 | 0.3388 | 0.4442 | 0.1002 | | 0.8915 | 129.03 | 4000 | 0.3430 | 0.4427 | 0.1000 | | 0.853 | 141.94 | 4400 | 0.3536 | 0.4375 | 0.1000 | | 0.8127 | 154.84 | 4800 | 0.3511 | 0.4344 | 0.0986 | | 0.7861 | 167.74 | 5200 | 0.3595 | 0.4372 | 0.0993 | | 0.7619 | 180.65 | 5600 | 0.3628 | 0.4316 | 0.0985 | | 0.7537 | 193.55 | 6000 | 0.3633 | 0.4174 | 0.0943 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.1.1.dev0 - Tokenizers 0.12.1
lvwerra/gpt2-imdb-pos-v2
42792eafd6ad310c8cc41fddc52fd7e8f14ede4c
2022-05-08T20:09:07.000Z
[ "pytorch", "gpt2", "transformers" ]
null
false
lvwerra
null
lvwerra/gpt2-imdb-pos-v2
3
null
transformers
22,364
Entry not found
theojolliffe/bart-cnn-pubmed-arxiv-pubmed-v3-e12
0d9cf814802dc6b319322a0378423472c9dc8bae
2022-05-08T23:01:48.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
theojolliffe
null
theojolliffe/bart-cnn-pubmed-arxiv-pubmed-v3-e12
3
null
transformers
22,365
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: bart-cnn-pubmed-arxiv-pubmed-v3-e12 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-cnn-pubmed-arxiv-pubmed-v3-e12 This model is a fine-tuned version of [theojolliffe/bart-cnn-pubmed-arxiv-pubmed](https://huggingface.co/theojolliffe/bart-cnn-pubmed-arxiv-pubmed) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8658 - Rouge1: 57.2678 - Rouge2: 43.347 - Rougel: 47.0854 - Rougelsum: 55.4167 - Gen Len: 142.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 12 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | 1.2548 | 1.0 | 795 | 0.9154 | 53.4249 | 34.0377 | 36.4396 | 50.9884 | 141.8889 | | 0.6994 | 2.0 | 1590 | 0.8213 | 54.7613 | 35.9428 | 38.3899 | 51.9527 | 142.0 | | 0.5272 | 3.0 | 2385 | 0.7703 | 53.8561 | 35.4871 | 38.0502 | 51.131 | 141.8889 | | 0.3407 | 4.0 | 3180 | 0.7764 | 53.9514 | 35.8553 | 39.1935 | 51.7005 | 142.0 | | 0.2612 | 5.0 | 3975 | 0.7529 | 54.4056 | 36.2605 | 40.8003 | 52.0424 | 142.0 | | 0.1702 | 6.0 | 4770 | 0.8105 | 54.2251 | 37.1441 | 41.2472 | 52.2803 | 142.0 | | 0.1276 | 7.0 | 5565 | 0.8004 | 56.49 | 40.4009 | 44.018 | 54.2404 | 141.5556 | | 0.0978 | 8.0 | 6360 | 0.7890 | 56.6339 | 40.9867 | 43.9603 | 54.4468 | 142.0 | | 0.0711 | 9.0 | 7155 | 0.8285 | 56.0469 | 40.7758 | 44.1395 | 53.9668 | 142.0 | | 0.0649 | 10.0 | 7950 | 0.8498 | 56.9873 | 42.4721 | 46.705 | 55.2188 | 142.0 | | 0.0471 | 11.0 | 8745 | 0.8547 | 57.7898 | 43.4238 | 46.5868 | 56.0858 | 142.0 | | 0.0336 | 12.0 | 9540 | 0.8658 | 57.2678 | 43.347 | 47.0854 | 55.4167 | 142.0 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
NTUYG/FG-CodeBERT
7cb0c7083e138c6a1bc3f9af56b47b04456e35b4
2022-05-09T05:12:11.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
NTUYG
null
NTUYG/FG-CodeBERT
3
null
transformers
22,366
--- license: apache-2.0 ---
soni69/DialoGPT-medium-holmes
f1d63b374739927a9283121aa5215f622662f77a
2022-05-09T18:54:44.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
soni69
null
soni69/DialoGPT-medium-holmes
3
null
transformers
22,367
--- tags: - conversational --- # Sherlock Holmes DialoGPT Model
lewtun/test-hub-pr-1
5c498ac40721a0b74f5683d4888f93a6766bbde8
2022-05-23T13:30:02.000Z
[ "pytorch", "bert", "text-classification", "en", "dataset:lewtun/autotrain-data-my-eval-project-615", "transformers", "autotrain", "model-index", "co2_eq_emissions" ]
text-classification
false
lewtun
null
lewtun/test-hub-pr-1
3
null
transformers
22,368
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - lewtun/autotrain-data-my-eval-project-615 co2_eq_emissions: 172.04481351504182 model-index: - name: bhadresh-savani/distilbert-base-uncased-emotion results: - task: name: Multi-class Classification type: text-classification dataset: type: emotion name: Emotion config: default split: test metrics: - name: Loss type: loss value: 0.17404702305793762 - name: Accuracy type: accuracy value: 0.927 - name: Macro F1 type: macro_f1 value: 0.8825061528287809 - name: Recall type: micro_f1 value: 0.927 - name: Weighted F1 type: weighted_f1 value: 0.926876082854655 - name: Macro Precision type: macro_precision value: 0.8880230732280744 - name: Micro Precision type: micro_precision value: 0.927 - name: Weighted Precision type: weighted_precision value: 0.9272902840835793 - name: Macro Recall type: macro_recall value: 0.8790126653780703 - name: Micro Recall type: micro_recall value: 0.927 - name: Weighted Recall type: weighted_recall value: 0.927 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 5694363 - CO2 Emissions (in grams): 172.04481351504182 ## Validation Metrics - Loss: 0.2228243350982666 - Accuracy: 0.9298 - Precision: 0.9434585224927775 - Recall: 0.9144 - AUC: 0.9566112000000001 - F1: 0.9287020109689214 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/lewtun/autotrain-my-eval-project-615-5694363 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("lewtun/autotrain-my-eval-project-615-5694363", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("lewtun/autotrain-my-eval-project-615-5694363", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
lsb/tironiculum
b932d8d9d3111cc156cee9e7abcd5292266e839e
2022-05-10T22:18:05.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
lsb
null
lsb/tironiculum
3
null
transformers
22,369
Entry not found
ybelkada/opt-125m-debug
ef108af6e570b1b8921d81a058f1fa3d88c4d1a4
2022-05-26T15:39:17.000Z
[ "pytorch", "opt", "feature-extraction", "transformers" ]
feature-extraction
false
ybelkada
null
ybelkada/opt-125m-debug
3
null
transformers
22,370
# OPT-125m debug Debug model for OPT-125m
Nonegom/roberta_finetune_twice
7786ba15fcb6e4b08da4ddcc729a69ccae2ebc3b
2022-05-09T10:25:20.000Z
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
Nonegom
null
Nonegom/roberta_finetune_twice
3
1
transformers
22,371
Entry not found
princeton-nlp/CoFi-RTE-s60
9d59239167f6572742b23ad1c1b4c28831591663
2022-05-09T15:23:20.000Z
[ "pytorch", "bert", "text-classification", "arxiv:2204.00408", "transformers" ]
text-classification
false
princeton-nlp
null
princeton-nlp/CoFi-RTE-s60
3
null
transformers
22,372
This is a model checkpoint for "[Structured Pruning Learns Compact and Accurate Models](https://arxiv.org/pdf/2204.00408.pdf)". The model is pruned from `bert-base-uncased` to a 60% sparsity on dataset RTE. Please go to [our repository](https://github.com/princeton-nlp/CoFiPruning) for more details on how to use the model for inference. Note that you would have to use the model class specified in our repository to load the model.
princeton-nlp/CoFi-RTE-s96
28013e693b5db0daf32ee30b9a0a35d08dfbaad6
2022-05-09T15:21:16.000Z
[ "pytorch", "bert", "text-classification", "arxiv:2204.00408", "transformers" ]
text-classification
false
princeton-nlp
null
princeton-nlp/CoFi-RTE-s96
3
null
transformers
22,373
This is a model checkpoint for "[Structured Pruning Learns Compact and Accurate Models](https://arxiv.org/pdf/2204.00408.pdf)". The model is pruned from `bert-base-uncased` to a 96% sparsity on dataset RTE. Please go to [our repository](https://github.com/princeton-nlp/CoFiPruning) for more details on how to use the model for inference. Note that you would have to use the model class specified in our repository to load the model.
princeton-nlp/CoFi-CoLA-s60
72e0a5de8d8675335084e44eb1aff7b1104d20f7
2022-05-09T15:23:43.000Z
[ "pytorch", "bert", "text-classification", "arxiv:2204.00408", "transformers" ]
text-classification
false
princeton-nlp
null
princeton-nlp/CoFi-CoLA-s60
3
null
transformers
22,374
This is a model checkpoint for "[Structured Pruning Learns Compact and Accurate Models](https://arxiv.org/pdf/2204.00408.pdf)". The model is pruned from `bert-base-uncased` to a 60% sparsity on dataset CoLA. Please go to [our repository](https://github.com/princeton-nlp/CoFiPruning) for more details on how to use the model for inference. Note that you would have to use the model class specified in our repository to load the model.
kushaljoseph/tiny-bert-sst2-distilled
ce30490503b207c79623122c76c36caa8d670e3a
2022-05-10T05:07:08.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
kushaljoseph
null
kushaljoseph/tiny-bert-sst2-distilled
3
null
transformers
22,375
Entry not found
nithya/project3-model
d2361f58d995f7949d80c6231f1f7d809a81e236
2022-05-09T16:57:17.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
nithya
null
nithya/project3-model
3
null
transformers
22,376
Entry not found
lucifermorninstar011/autotrain-lucifer_multi-844026969
7905fbf47fbf093d3aaa61e06a19594c28cf2a4d
2022-05-09T21:46:40.000Z
[ "pytorch", "distilbert", "text-classification", "en", "dataset:lucifermorninstar011/autotrain-data-lucifer_multi", "transformers", "autotrain", "co2_eq_emissions" ]
text-classification
false
lucifermorninstar011
null
lucifermorninstar011/autotrain-lucifer_multi-844026969
3
null
transformers
22,377
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - lucifermorninstar011/autotrain-data-lucifer_multi co2_eq_emissions: 114.27071200298751 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 844026969 - CO2 Emissions (in grams): 114.27071200298751 ## Validation Metrics - Loss: 0.01150986272841692 - Accuracy: 0.99642966866208 - Macro F1: 0.9962909855453217 - Micro F1: 0.99642966866208 - Weighted F1: 0.9964296206983974 - Macro Precision: 0.9963861124818623 - Micro Precision: 0.99642966866208 - Weighted Precision: 0.9964357526967369 - Macro Recall: 0.9962012842304059 - Micro Recall: 0.99642966866208 - Weighted Recall: 0.99642966866208 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/lucifermorninstar011/autotrain-lucifer_multi-844026969 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("lucifermorninstar011/autotrain-lucifer_multi-844026969", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("lucifermorninstar011/autotrain-lucifer_multi-844026969", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
veronica320/EPC_ADEPT_roberta-l_200
9cc1ed8580cabb6cd780c0c8c4ffedbed24ef003
2022-05-09T21:33:02.000Z
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
false
veronica320
null
veronica320/EPC_ADEPT_roberta-l_200
3
null
transformers
22,378
Entry not found
theojolliffe/bart-cnn-pubmed-arxiv-pubmed-arxiv
b1bbe260e92e27bfe17ddf467a4ce77f404c991b
2022-05-11T04:45:29.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "dataset:scientific_papers", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
theojolliffe
null
theojolliffe/bart-cnn-pubmed-arxiv-pubmed-arxiv
3
null
transformers
22,379
--- license: mit tags: - generated_from_trainer datasets: - scientific_papers metrics: - rouge model-index: - name: bart-cnn-pubmed-arxiv-pubmed-arxiv results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: scientific_papers type: scientific_papers args: arxiv metrics: - name: Rouge1 type: rouge value: 42.1723 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-cnn-pubmed-arxiv-pubmed-arxiv This model is a fine-tuned version of [theojolliffe/bart-cnn-pubmed-arxiv-pubmed](https://huggingface.co/theojolliffe/bart-cnn-pubmed-arxiv-pubmed) on the scientific_papers dataset. It achieves the following results on the evaluation set: - Loss: 2.1382 - Rouge1: 42.1723 - Rouge2: 15.7664 - Rougel: 24.5336 - Rougelsum: 37.7532 - Gen Len: 127.6382 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | 2.125 | 1.0 | 67679 | 2.1382 | 42.1723 | 15.7664 | 24.5336 | 37.7532 | 127.6382 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
nepp1d0/prot_bert_classification_finetuned_no_finetune
5fcd13075e5a5e75790438933059c69a9c747282
2022-05-10T12:27:27.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
nepp1d0
null
nepp1d0/prot_bert_classification_finetuned_no_finetune
3
null
transformers
22,380
--- tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: prot_bert_classification_finetuned_no_finetune results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # prot_bert_classification_finetuned_no_finetune This model is a fine-tuned version of [Rostlab/prot_bert](https://huggingface.co/Rostlab/prot_bert) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.6212 - Accuracy: 0.6473 - F1: 0.6623 - Precision: 0.6201 - Recall: 0.7107 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 1 - eval_batch_size: 1 - seed: 3 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5 - num_epochs: 6 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:---------:|:------:| | 0.6494 | 1.0 | 3332 | 0.6479 | 0.6439 | 0.6679 | 0.6116 | 0.7357 | | 0.5357 | 2.0 | 6664 | 0.6440 | 0.6148 | 0.6459 | 0.5845 | 0.7218 | | 0.4661 | 3.0 | 9996 | 0.6265 | 0.6283 | 0.6414 | 0.6047 | 0.6829 | | 0.506 | 4.0 | 13328 | 0.6192 | 0.6439 | 0.6567 | 0.6187 | 0.6996 | | 0.4204 | 5.0 | 16660 | 0.6122 | 0.6567 | 0.6752 | 0.6259 | 0.7330 | | 0.6071 | 6.0 | 19992 | 0.6212 | 0.6473 | 0.6623 | 0.6201 | 0.7107 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
ofirzaf/bert-large-uncased-mnli
58ce3f764986a701dab1f30dfc6f63663ddc453f
2022-05-09T23:58:32.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
ofirzaf
null
ofirzaf/bert-large-uncased-mnli
3
null
transformers
22,381
Entry not found
masakhane/m2m100_418M_pcm_en_news
ebad1cc7a87bb830025242a899da764d4df57e84
2022-05-10T11:47:56.000Z
[ "pytorch", "m2m_100", "text2text-generation", "transformers", "license:afl-3.0", "autotrain_compatible" ]
text2text-generation
false
masakhane
null
masakhane/m2m100_418M_pcm_en_news
3
null
transformers
22,382
--- license: afl-3.0 ---
nielsr/pix2seq-base
1294a0b6b3e281c2cee678c1815c11ff7e1fc297
2022-05-10T10:13:20.000Z
[ "pytorch", "pix2seq", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
nielsr
null
nielsr/pix2seq-base
3
null
transformers
22,383
Entry not found
SreyanG-NVIDIA/bert-base-uncased-finetuned-squad
b9edb783c8346ea3e7915ed0989db8dc98cfd1f8
2022-05-10T12:54:15.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
SreyanG-NVIDIA
null
SreyanG-NVIDIA/bert-base-uncased-finetuned-squad
3
null
transformers
22,384
Entry not found
moshew/MiniLM-L12-clinc-distilled
b2bb28600332d59a58440afcce6a605a4517f8eb
2022-05-10T19:18:00.000Z
[ "pytorch", "tensorboard", "roberta", "text-classification", "transformers" ]
text-classification
false
moshew
null
moshew/MiniLM-L12-clinc-distilled
3
null
transformers
22,385
Entry not found
ceggian/bert_post_trained_reddit_batch128
2e47ad8dac3381e57e3d746d157833a186a3ff25
2022-05-11T06:21:58.000Z
[ "pytorch", "bert", "pretraining", "transformers" ]
null
false
ceggian
null
ceggian/bert_post_trained_reddit_batch128
3
null
transformers
22,386
Entry not found
ceggian/sbert_standard_reddit_softmax
a92f8e96687ad9a6b7c4e8d81e4b41b1b40a6796
2022-05-11T06:49:38.000Z
[ "pytorch", "bert", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
ceggian
null
ceggian/sbert_standard_reddit_softmax
3
null
sentence-transformers
22,387
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 117759 with parameters: ``` {'batch_size': 8, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.SoftmaxLoss.SoftmaxLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 11775, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ceggian/sbert_pt_reddit_mnr_512
f7d39c12f7631a9d456686812ee04a74993e2791
2022-05-11T13:33:48.000Z
[ "pytorch", "bert", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
ceggian
null
ceggian/sbert_pt_reddit_mnr_512
3
1
sentence-transformers
22,388
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 39289 with parameters: ``` {'batch_size': 8} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 3928, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
orenpereg/paraphrase-mpnet-base-v2_sst2_64samps
8789633f4d275ddc1ced554042aa384c734890b7
2022-05-11T13:40:33.000Z
[ "pytorch", "mpnet", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
orenpereg
null
orenpereg/paraphrase-mpnet-base-v2_sst2_64samps
3
null
sentence-transformers
22,389
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # orenpereg/paraphrase-mpnet-base-v2_sst2_64samps This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('orenpereg/paraphrase-mpnet-base-v2_sst2_64samps') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('orenpereg/paraphrase-mpnet-base-v2_sst2_64samps') model = AutoModel.from_pretrained('orenpereg/paraphrase-mpnet-base-v2_sst2_64samps') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=orenpereg/paraphrase-mpnet-base-v2_sst2_64samps) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 80 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 3, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
danieleV9H/hubert-base-timit-demo-google-colab-ft30ep_v4
66ff2e38a2fe2886a58d4d9396fa8f21646160cd
2022-05-14T10:32:13.000Z
[ "pytorch", "tensorboard", "hubert", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
danieleV9H
null
danieleV9H/hubert-base-timit-demo-google-colab-ft30ep_v4
3
null
transformers
22,390
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: hubert-base-timit-demo-google-colab-ft30ep_v4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hubert-base-timit-demo-google-colab-ft35ep This model is a fine-tuned version of [facebook/hubert-base-ls960](https://huggingface.co/facebook/hubert-base-ls960) on the timit-asr dataset. It achieves the following results on the evaluation set: - Loss: 0.4602 - Wer: 0.3466 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 3.825 | 0.87 | 500 | 2.9521 | 1.0 | | 2.431 | 1.73 | 1000 | 0.9760 | 0.8013 | | 1.0089 | 2.6 | 1500 | 0.5934 | 0.5968 | | 0.6859 | 3.46 | 2000 | 0.5132 | 0.5356 | | 0.5302 | 4.33 | 2500 | 0.4506 | 0.4894 | | 0.44 | 5.19 | 3000 | 0.4340 | 0.4670 | | 0.3926 | 6.06 | 3500 | 0.4506 | 0.4528 | | 0.3326 | 6.92 | 4000 | 0.4197 | 0.4486 | | 0.2937 | 7.79 | 4500 | 0.4093 | 0.4193 | | 0.2568 | 8.65 | 5000 | 0.4098 | 0.4229 | | 0.2473 | 9.52 | 5500 | 0.4090 | 0.4141 | | 0.2233 | 10.38 | 6000 | 0.4152 | 0.4125 | | 0.2108 | 11.25 | 6500 | 0.4586 | 0.4189 | | 0.2086 | 12.11 | 7000 | 0.4284 | 0.3969 | | 0.1858 | 12.98 | 7500 | 0.4028 | 0.3946 | | 0.1641 | 13.84 | 8000 | 0.4679 | 0.4002 | | 0.1686 | 14.71 | 8500 | 0.4441 | 0.3936 | | 0.1489 | 15.57 | 9000 | 0.4897 | 0.3828 | | 0.1541 | 16.44 | 9500 | 0.4953 | 0.3783 | | 0.1417 | 17.3 | 10000 | 0.4500 | 0.3758 | | 0.1428 | 18.17 | 10500 | 0.4533 | 0.3796 | | 0.1306 | 19.03 | 11000 | 0.4474 | 0.3792 | | 0.1185 | 19.9 | 11500 | 0.4762 | 0.3743 | | 0.1081 | 20.76 | 12000 | 0.4770 | 0.3699 | | 0.1253 | 21.63 | 12500 | 0.4749 | 0.3629 | | 0.1087 | 22.49 | 13000 | 0.4577 | 0.3534 | | 0.1172 | 23.36 | 13500 | 0.4819 | 0.3525 | | 0.1086 | 24.22 | 14000 | 0.4709 | 0.3623 | | 0.089 | 25.09 | 14500 | 0.4852 | 0.3544 | | 0.086 | 25.95 | 15000 | 0.4602 | 0.3555 | | 0.086 | 26.82 | 15500 | 0.4861 | 0.3497 | | 0.086 | 27.68 | 16000 | 0.4527 | 0.3473 | | 0.0919 | 28.55 | 16500 | 0.4607 | 0.3487 | | 0.0792 | 29.41 | 17000 | 0.4602 | 0.3466 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 1.18.3 - Tokenizers 0.12.1
Matthijs/mobilevit-xx-small
850dc69265907c2019d8a67293a01dd2b31baecd
2022-05-11T14:42:38.000Z
[ "pytorch", "mobilevit", "image-classification", "transformers" ]
image-classification
false
Matthijs
null
Matthijs/mobilevit-xx-small
3
null
transformers
22,391
Entry not found
PSW/low_resource_percent20_min2swap_seed1
f179846ea51f16e398b22ca3f63bb7f5cf6126ab
2022-05-12T08:54:14.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent20_min2swap_seed1
3
null
transformers
22,392
Entry not found
PSW/low_resource_percent20_min2swap_seed42
2f0b55efc1bc6d4989fcaacc1a4e6d37bea24c09
2022-05-12T09:35:55.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent20_min2swap_seed42
3
null
transformers
22,393
Entry not found
PSW/low_resource_percent20_max2swap_seed27
dcd9985851a7ddecdd2ab1a9888a022b2ce33254
2022-05-12T10:11:07.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/low_resource_percent20_max2swap_seed27
3
null
transformers
22,394
Entry not found
monsoon-nlp/czech-movie-rating
ede5a7485caa91dc8803fb9f436515c4e5aaabff
2022-05-11T22:03:48.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
monsoon-nlp
null
monsoon-nlp/czech-movie-rating
3
null
transformers
22,395
Entry not found
eslamxm/mt5-base-finetuned-urdu-arabic
aec9b30c6dbae689fa9bc7f59f07e78e215a0443
2022-05-12T09:18:16.000Z
[ "pytorch", "mt5", "text2text-generation", "dataset:xlsum", "transformers", "summarization", "arabic", "ar", "Abstractive Summarization", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
summarization
false
eslamxm
null
eslamxm/mt5-base-finetuned-urdu-arabic
3
null
transformers
22,396
--- license: apache-2.0 tags: - summarization - arabic - ar - mt5 - Abstractive Summarization - generated_from_trainer datasets: - xlsum model-index: - name: mt5-base-finetuned-urdu-finetuned-urdu-arabic results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-base-finetuned-urdu-finetuned-urdu-arabic This model is a fine-tuned version of [eslamxm/mt5-base-finetuned-urdu](https://huggingface.co/eslamxm/mt5-base-finetuned-urdu) on the xlsum dataset. It achieves the following results on the evaluation set: - Loss: 3.3744 - Rouge-1: 22.77 - Rouge-2: 10.15 - Rouge-l: 20.71 - Gen Len: 19.0 - Bertscore: 71.46 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - label_smoothing_factor: 0.1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge-1 | Rouge-2 | Rouge-l | Gen Len | Bertscore | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:-------:|:---------:| | 4.5155 | 1.0 | 1172 | 3.6895 | 18.81 | 6.77 | 17.01 | 19.0 | 70.27 | | 3.8315 | 2.0 | 2344 | 3.5047 | 19.75 | 7.79 | 17.95 | 19.0 | 70.58 | | 3.6122 | 3.0 | 3516 | 3.4231 | 20.46 | 8.44 | 18.7 | 19.0 | 70.8 | | 3.4735 | 4.0 | 4688 | 3.3835 | 21.12 | 8.86 | 19.21 | 19.0 | 70.98 | | 3.3855 | 5.0 | 5860 | 3.3744 | 21.48 | 9.01 | 19.57 | 19.0 | 71.17 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
enoriega/kw_pubmed_10000_0.000006
9ea766f9637655504b9179d18f346ae9e712c4c8
2022-05-12T14:25:29.000Z
[ "pytorch", "tensorboard", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
enoriega
null
enoriega/kw_pubmed_10000_0.000006
3
null
transformers
22,397
Entry not found
reallycarlaost/emobert-single-binary
331deac5cc3c8fe0c47f5c1915857b18dd3319e3
2022-05-12T13:37:59.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
reallycarlaost
null
reallycarlaost/emobert-single-binary
3
null
transformers
22,398
Entry not found
manthan40/wav2vec2-base-finetuned-manthan_base
e90b01ca221f3baf37144f572f45ab4430770521
2022-05-13T01:39:46.000Z
[ "pytorch", "tensorboard", "wav2vec2", "audio-classification", "dataset:new_dataset", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
audio-classification
false
manthan40
null
manthan40/wav2vec2-base-finetuned-manthan_base
3
null
transformers
22,399
--- license: apache-2.0 tags: - generated_from_trainer datasets: - new_dataset metrics: - accuracy model-index: - name: wav2vec2-base-finetuned-manthan_base results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-finetuned-manthan_base This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the new_dataset dataset. It achieves the following results on the evaluation set: - Loss: 1.2246 - Accuracy: 0.9691 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.4725 | 0.98 | 12 | 2.4222 | 0.1057 | | 2.4501 | 1.98 | 24 | 2.2420 | 0.2784 | | 2.2977 | 2.98 | 36 | 2.0155 | 0.7603 | | 2.1331 | 3.98 | 48 | 1.8193 | 0.8582 | | 1.7927 | 4.98 | 60 | 1.6376 | 0.9459 | | 1.7226 | 5.98 | 72 | 1.4940 | 0.9613 | | 1.6036 | 6.98 | 84 | 1.3632 | 0.9665 | | 1.5181 | 7.98 | 96 | 1.2963 | 0.9562 | | 1.4384 | 8.98 | 108 | 1.2406 | 0.9742 | | 1.3339 | 9.98 | 120 | 1.2246 | 0.9691 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 1.14.0 - Tokenizers 0.12.1