modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
CEBaB/roberta-base.CEBaB.causalm.service__food.3-class.exclusive.seed_45
558ee8833fdd9181e49f821db4ba24cd2403cc86
2022-05-24T10:07:40.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.service__food.3-class.exclusive.seed_45
1
null
transformers
32,400
Entry not found
CEBaB/roberta-base.CEBaB.causalm.service__food.3-class.exclusive.seed_46
a15995c2823dd9d9f60860a6183475459f16ff97
2022-05-24T10:07:42.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.service__food.3-class.exclusive.seed_46
1
null
transformers
32,401
Entry not found
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_42
221ac44799e9fb8218890a47a2824bf8796fb229
2022-05-24T10:10:24.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_42
1
null
transformers
32,402
Entry not found
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_43
350b4fa79d7f6d826b6293f81d089c1a1c970827
2022-05-24T10:10:26.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_43
1
null
transformers
32,403
Entry not found
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_44
516048f8d31a4b30007c6f84dad0577b4761e87a
2022-05-24T10:10:28.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_44
1
null
transformers
32,404
Entry not found
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_45
5b4362a7677ade068db7a92a5c2ff7e2fd16cb78
2022-05-24T10:10:31.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_45
1
null
transformers
32,405
Entry not found
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_46
e2a5dc65aa64e6939ee7f1d114ce3a7e1c407369
2022-05-24T10:10:33.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.ambiance__food.5-class.exclusive.seed_46
1
null
transformers
32,406
Entry not found
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_42
399bb878b13786b11e71209db154364fefc44f5d
2022-05-24T10:10:35.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_42
1
null
transformers
32,407
Entry not found
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_43
e84014534fba154036e747ebf8a8e4906b975f2b
2022-05-24T10:10:37.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_43
1
null
transformers
32,408
Entry not found
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_44
c35073e961f95a8e097dae77347a711a04d60096
2022-05-24T10:10:39.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_44
1
null
transformers
32,409
Entry not found
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_45
cfcd1341eb7955e5d12d73b3f95d3a537b5304a7
2022-05-24T10:10:41.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_45
1
null
transformers
32,410
Entry not found
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_46
fae4afe5aaa26625cc3f93ed93e746aae83bd450
2022-05-24T10:10:43.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.food__service.5-class.exclusive.seed_46
1
null
transformers
32,411
Entry not found
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_42
4da21f2648b49da2025ea5fe6ddf27e9e137070e
2022-05-24T10:10:45.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_42
1
null
transformers
32,412
Entry not found
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_43
1f39ca0537e3bf71e5b04e89442185804daed291
2022-05-24T10:10:47.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_43
1
null
transformers
32,413
Entry not found
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_44
084821a15c7d27d050ffef415884336958724c00
2022-05-24T10:10:48.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_44
1
null
transformers
32,414
Entry not found
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_45
5c2de4c88cbfdf1271c1de2f36c48ec3ad100670
2022-05-24T10:10:50.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_45
1
null
transformers
32,415
Entry not found
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_46
221c1af230600ed51d109402476051c175b81d14
2022-05-24T10:10:52.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.noise__food.5-class.exclusive.seed_46
1
null
transformers
32,416
Entry not found
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_42
7eeec62a88aa868db3fc0e312230a059fc5eb474
2022-05-24T10:10:54.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_42
1
null
transformers
32,417
Entry not found
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_43
646efc74f7294a70612f3a276953e93a8532bc2a
2022-05-24T10:10:56.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_43
1
null
transformers
32,418
Entry not found
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_44
05b55387ce981b0d4f05c11eb61e5094f0c347f1
2022-05-24T10:10:58.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_44
1
null
transformers
32,419
Entry not found
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_45
3c127c5194e3cc204078acc7a6d4949e663ca7cb
2022-05-24T10:11:00.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_45
1
null
transformers
32,420
Entry not found
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_46
f4d5558ef0d032b4761c275bbe16fc3a24d34485
2022-05-24T10:11:02.000Z
[ "pytorch", "roberta_causalm", "transformers" ]
null
false
CEBaB
null
CEBaB/roberta-base.CEBaB.causalm.service__food.5-class.exclusive.seed_46
1
null
transformers
32,421
Entry not found
Vkt/victor-hg-ptbr-2.0
fe52be10b1e9eda9c46715e851d213c63a31cb2d
2022-05-26T04:10:53.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Vkt
null
Vkt/victor-hg-ptbr-2.0
1
null
transformers
32,422
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: victor-hg-ptbr-2.0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # victor-hg-ptbr-2.0 This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.0240 - Wer: 0.0219 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 4.4069 | 0.21 | 400 | 1.1372 | 0.9140 | | 0.8079 | 0.43 | 800 | 0.5822 | 0.5339 | | 0.5821 | 0.64 | 1200 | 0.4226 | 0.4177 | | 0.5159 | 0.86 | 1600 | 0.4074 | 0.3970 | | 0.4484 | 1.07 | 2000 | 0.3144 | 0.3220 | | 0.3937 | 1.29 | 2400 | 0.3160 | 0.3264 | | 0.3911 | 1.5 | 2800 | 0.2863 | 0.2956 | | 0.3761 | 1.71 | 3200 | 0.3029 | 0.3128 | | 0.3722 | 1.93 | 3600 | 0.2771 | 0.2933 | | 0.3193 | 2.14 | 4000 | 0.2603 | 0.2795 | | 0.3013 | 2.36 | 4400 | 0.2682 | 0.2703 | | 0.3039 | 2.57 | 4800 | 0.2630 | 0.2618 | | 0.3133 | 2.79 | 5200 | 0.2578 | 0.2629 | | 0.3173 | 3.0 | 5600 | 0.2640 | 0.2746 | | 0.2521 | 3.22 | 6000 | 0.2797 | 0.2662 | | 0.2654 | 3.43 | 6400 | 0.2762 | 0.2640 | | 0.2586 | 3.64 | 6800 | 0.2642 | 0.2596 | | 0.265 | 3.86 | 7200 | 0.2656 | 0.2794 | | 0.2432 | 4.07 | 7600 | 0.2459 | 0.2497 | | 0.226 | 4.29 | 8000 | 0.2533 | 0.2509 | | 0.2385 | 4.5 | 8400 | 0.2332 | 0.2394 | | 0.2332 | 4.72 | 8800 | 0.2500 | 0.2569 | | 0.2358 | 4.93 | 9200 | 0.2384 | 0.2489 | | 0.2169 | 5.14 | 9600 | 0.2410 | 0.2380 | | 0.2038 | 5.36 | 10000 | 0.2426 | 0.2333 | | 0.2109 | 5.57 | 10400 | 0.2480 | 0.2473 | | 0.2147 | 5.79 | 10800 | 0.2341 | 0.2272 | | 0.2153 | 6.0 | 11200 | 0.2402 | 0.2424 | | 0.186 | 6.22 | 11600 | 0.2560 | 0.2489 | | 0.1854 | 6.43 | 12000 | 0.2444 | 0.2402 | | 0.1915 | 6.65 | 12400 | 0.2720 | 0.2531 | | 0.1929 | 6.86 | 12800 | 0.2516 | 0.2342 | | 0.1842 | 7.07 | 13200 | 0.2480 | 0.2304 | | 0.1682 | 7.29 | 13600 | 0.2393 | 0.2276 | | 0.1753 | 7.5 | 14000 | 0.2514 | 0.2263 | | 0.1798 | 7.72 | 14400 | 0.2191 | 0.2178 | | 0.1736 | 7.93 | 14800 | 0.2351 | 0.2197 | | 0.1668 | 8.15 | 15200 | 0.2315 | 0.2194 | | 0.1545 | 8.36 | 15600 | 0.2291 | 0.2079 | | 0.1508 | 8.57 | 16000 | 0.2351 | 0.2134 | | 0.1662 | 8.79 | 16400 | 0.2298 | 0.2197 | | 0.1621 | 9.0 | 16800 | 0.2314 | 0.2219 | | 0.1416 | 9.22 | 17200 | 0.2306 | 0.2192 | | 0.1455 | 9.43 | 17600 | 0.2466 | 0.2184 | | 0.1522 | 9.65 | 18000 | 0.2392 | 0.2255 | | 0.1434 | 9.86 | 18400 | 0.2464 | 0.2208 | | 0.1362 | 10.08 | 18800 | 0.2351 | 0.2095 | | 0.127 | 10.29 | 19200 | 0.2373 | 0.2110 | | 0.133 | 10.5 | 19600 | 0.2269 | 0.2031 | | 0.1308 | 10.72 | 20000 | 0.2400 | 0.2096 | | 0.1331 | 10.93 | 20400 | 0.2243 | 0.2083 | | 0.125 | 11.15 | 20800 | 0.2334 | 0.2063 | | 0.1236 | 11.36 | 21200 | 0.2195 | 0.2044 | | 0.1263 | 11.58 | 21600 | 0.2263 | 0.2050 | | 0.1235 | 11.79 | 22000 | 0.2217 | 0.2087 | | 0.1301 | 12.0 | 22400 | 0.2332 | 0.2094 | | 0.1123 | 12.22 | 22800 | 0.2195 | 0.2068 | | 0.117 | 12.43 | 23200 | 0.2266 | 0.2110 | | 0.1156 | 12.65 | 23600 | 0.2469 | 0.2063 | | 0.1117 | 12.86 | 24000 | 0.2379 | 0.2035 | | 0.1124 | 13.08 | 24400 | 0.2156 | 0.1963 | | 0.106 | 13.29 | 24800 | 0.2310 | 0.1988 | | 0.1066 | 13.5 | 25200 | 0.2334 | 0.1950 | | 0.1069 | 13.72 | 25600 | 0.2230 | 0.2011 | | 0.1089 | 13.93 | 26000 | 0.2233 | 0.2003 | | 0.0977 | 14.15 | 26400 | 0.2273 | 0.1895 | | 0.0972 | 14.36 | 26800 | 0.2265 | 0.1887 | | 0.1005 | 14.58 | 27200 | 0.2196 | 0.1934 | | 0.1058 | 14.79 | 27600 | 0.2213 | 0.1870 | | 0.1027 | 15.01 | 28000 | 0.2361 | 0.1916 | | 0.0886 | 15.22 | 28400 | 0.2275 | 0.1815 | | 0.0885 | 15.43 | 28800 | 0.2230 | 0.1891 | | 0.0911 | 15.65 | 29200 | 0.2237 | 0.1989 | | 0.0923 | 15.86 | 29600 | 0.2200 | 0.1857 | | 0.0868 | 16.08 | 30000 | 0.2248 | 0.1875 | | 0.0812 | 16.29 | 30400 | 0.2240 | 0.1874 | | 0.0829 | 16.51 | 30800 | 0.2198 | 0.1814 | | 0.0832 | 16.72 | 31200 | 0.2328 | 0.1892 | | 0.0822 | 16.93 | 31600 | 0.2283 | 0.1862 | | 0.0828 | 17.15 | 32000 | 0.2283 | 0.1806 | | 0.0791 | 17.36 | 32400 | 0.2197 | 0.1787 | | 0.0801 | 17.58 | 32800 | 0.2249 | 0.1815 | | 0.0804 | 17.79 | 33200 | 0.2304 | 0.1789 | | 0.0833 | 18.01 | 33600 | 0.2235 | 0.1832 | | 0.0762 | 18.22 | 34000 | 0.2358 | 0.1784 | | 0.0688 | 18.44 | 34400 | 0.2183 | 0.1758 | | 0.0751 | 18.65 | 34800 | 0.2169 | 0.1805 | | 0.0729 | 18.86 | 35200 | 0.2296 | 0.1770 | | 0.0681 | 19.08 | 35600 | 0.2380 | 0.1770 | | 0.067 | 19.29 | 36000 | 0.2153 | 0.1777 | | 0.0669 | 19.51 | 36400 | 0.2260 | 0.1742 | | 0.0824 | 19.72 | 36800 | 0.0289 | 0.0310 | | 0.0857 | 19.94 | 37200 | 0.0289 | 0.0322 | | 0.0799 | 20.15 | 37600 | 0.0264 | 0.0298 | | 0.0767 | 20.36 | 38000 | 0.0273 | 0.0318 | | 0.079 | 20.58 | 38400 | 0.0274 | 0.0320 | | 0.0791 | 20.79 | 38800 | 0.0279 | 0.0318 | | 0.0805 | 21.01 | 39200 | 0.0285 | 0.0330 | | 0.0622 | 21.22 | 39600 | 0.0263 | 0.0306 | | 0.0622 | 21.44 | 40000 | 0.0290 | 0.0318 | | 0.0672 | 21.65 | 40400 | 0.0278 | 0.0330 | | 0.0706 | 21.86 | 40800 | 0.0270 | 0.0297 | | 0.0619 | 22.08 | 41200 | 0.0288 | 0.0328 | | 0.0633 | 22.29 | 41600 | 0.0256 | 0.0303 | | 0.0618 | 22.51 | 42000 | 0.0263 | 0.0299 | | 0.0576 | 22.72 | 42400 | 0.0273 | 0.0301 | | 0.0583 | 22.94 | 42800 | 0.0282 | 0.0297 | | 0.0565 | 23.15 | 43200 | 0.0256 | 0.0280 | | 0.0557 | 23.37 | 43600 | 0.0268 | 0.0280 | | 0.0548 | 23.58 | 44000 | 0.0266 | 0.0291 | | 0.056 | 23.79 | 44400 | 0.0264 | 0.0290 | | 0.0546 | 24.01 | 44800 | 0.0273 | 0.0284 | | 0.0496 | 24.22 | 45200 | 0.0261 | 0.0279 | | 0.0512 | 24.44 | 45600 | 0.0256 | 0.0281 | | 0.0482 | 24.65 | 46000 | 0.0264 | 0.0285 | | 0.0503 | 24.87 | 46400 | 0.0256 | 0.0268 | | 0.0471 | 25.08 | 46800 | 0.0270 | 0.0282 | | 0.0453 | 25.29 | 47200 | 0.0255 | 0.0267 | | 0.0431 | 25.51 | 47600 | 0.0251 | 0.0264 | | 0.0464 | 25.72 | 48000 | 0.0262 | 0.0261 | | 0.0431 | 25.94 | 48400 | 0.0257 | 0.0265 | | 0.0405 | 26.15 | 48800 | 0.0260 | 0.0251 | | 0.0406 | 26.37 | 49200 | 0.0246 | 0.0250 | | 0.0397 | 26.58 | 49600 | 0.0252 | 0.0254 | | 0.0403 | 26.8 | 50000 | 0.0250 | 0.0256 | | 0.0385 | 27.01 | 50400 | 0.0254 | 0.0241 | | 0.0398 | 27.22 | 50800 | 0.0255 | 0.0242 | | 0.0363 | 27.44 | 51200 | 0.0250 | 0.0236 | | 0.0372 | 27.65 | 51600 | 0.0247 | 0.0232 | | 0.0362 | 27.87 | 52000 | 0.0240 | 0.0226 | | 0.0367 | 28.08 | 52400 | 0.0246 | 0.0224 | | 0.0347 | 28.3 | 52800 | 0.0247 | 0.0229 | | 0.0348 | 28.51 | 53200 | 0.0241 | 0.0229 | | 0.0331 | 28.72 | 53600 | 0.0242 | 0.0224 | | 0.0339 | 28.94 | 54000 | 0.0241 | 0.0220 | | 0.0336 | 29.15 | 54400 | 0.0244 | 0.0221 | | 0.0336 | 29.37 | 54800 | 0.0243 | 0.0215 | | 0.0349 | 29.58 | 55200 | 0.0239 | 0.0217 | | 0.0308 | 29.8 | 55600 | 0.0240 | 0.0219 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.8.1+cu111 - Datasets 2.2.1 - Tokenizers 0.12.1
NabilOulbaz/bertweet_retrained_semEval2019
f7822d0a52beb47dd71eb0fa430c8797778b8769
2022-05-24T13:31:09.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
NabilOulbaz
null
NabilOulbaz/bertweet_retrained_semEval2019
1
null
transformers
32,423
Entry not found
ismail-lucifer011/autotrain-name_all-904029569
0d9a45f404f8c8a09b0054899aa3a13add72ba39
2022-05-24T14:42:21.000Z
[ "pytorch", "distilbert", "token-classification", "en", "dataset:ismail-lucifer011/autotrain-data-name_all", "transformers", "autotrain", "co2_eq_emissions", "autotrain_compatible" ]
token-classification
false
ismail-lucifer011
null
ismail-lucifer011/autotrain-name_all-904029569
1
null
transformers
32,424
--- tags: autotrain language: en widget: - text: "I love AutoTrain 🤗" datasets: - ismail-lucifer011/autotrain-data-name_all co2_eq_emissions: 0.527083766435658 --- # Model Trained Using AutoTrain - Problem type: Entity Extraction - Model ID: 904029569 - CO2 Emissions (in grams): 0.527083766435658 ## Validation Metrics - Loss: 0.0036354903131723404 - Accuracy: 0.9989951257999512 - Precision: 0.9888963290924173 - Recall: 0.9934437092741895 - F1: 0.9911648034619546 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/ismail-lucifer011/autotrain-name_all-904029569 ``` Or Python API: ``` from transformers import AutoModelForTokenClassification, AutoTokenizer model = AutoModelForTokenClassification.from_pretrained("ismail-lucifer011/autotrain-name_all-904029569", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("ismail-lucifer011/autotrain-name_all-904029569", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
AntoDono/DialoGPT-Bopy-Patch2
919713b62eaebf247231d52e5fc87f1186c6ac58
2022-05-24T20:15:40.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
AntoDono
null
AntoDono/DialoGPT-Bopy-Patch2
1
null
transformers
32,425
Entry not found
anablasi/model_10k_qa
66d343f774a97a496d0a7ae6c66baa2239f39753
2022-05-24T22:04:29.000Z
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
anablasi
null
anablasi/model_10k_qa
1
null
transformers
32,426
Entry not found
chrisvinsen/xlsr-wav2vec2-2
2355b754056a85f5a636d0a76d413ceaac788784
2022-05-25T10:21:44.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
chrisvinsen
null
chrisvinsen/xlsr-wav2vec2-2
1
null
transformers
32,427
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: xlsr-wav2vec2-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlsr-wav2vec2-2 This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5884 - Wer: 0.4301 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 800 - num_epochs: 60 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 6.6058 | 1.38 | 400 | 3.1894 | 1.0 | | 2.3145 | 2.76 | 800 | 0.7193 | 0.7976 | | 0.6737 | 4.14 | 1200 | 0.5338 | 0.6056 | | 0.4651 | 5.52 | 1600 | 0.5699 | 0.6007 | | 0.3968 | 6.9 | 2000 | 0.4608 | 0.5221 | | 0.3281 | 8.28 | 2400 | 0.5264 | 0.5209 | | 0.2937 | 9.65 | 2800 | 0.5366 | 0.5096 | | 0.2619 | 11.03 | 3200 | 0.4902 | 0.5021 | | 0.2394 | 12.41 | 3600 | 0.4706 | 0.4908 | | 0.2139 | 13.79 | 4000 | 0.5526 | 0.4871 | | 0.2034 | 15.17 | 4400 | 0.5396 | 0.5108 | | 0.1946 | 16.55 | 4800 | 0.4959 | 0.4866 | | 0.1873 | 17.93 | 5200 | 0.4898 | 0.4877 | | 0.1751 | 19.31 | 5600 | 0.5488 | 0.4932 | | 0.1668 | 20.69 | 6000 | 0.5645 | 0.4986 | | 0.1638 | 22.07 | 6400 | 0.5367 | 0.4946 | | 0.1564 | 23.45 | 6800 | 0.5282 | 0.4898 | | 0.1566 | 24.83 | 7200 | 0.5489 | 0.4841 | | 0.1522 | 26.21 | 7600 | 0.5439 | 0.4821 | | 0.1378 | 27.59 | 8000 | 0.5796 | 0.4866 | | 0.1459 | 28.96 | 8400 | 0.5603 | 0.4875 | | 0.1406 | 30.34 | 8800 | 0.6773 | 0.5005 | | 0.1298 | 31.72 | 9200 | 0.5858 | 0.4827 | | 0.1268 | 33.1 | 9600 | 0.6007 | 0.4790 | | 0.1204 | 34.48 | 10000 | 0.5716 | 0.4734 | | 0.113 | 35.86 | 10400 | 0.5866 | 0.4748 | | 0.1088 | 37.24 | 10800 | 0.5790 | 0.4752 | | 0.1074 | 38.62 | 11200 | 0.5966 | 0.4721 | | 0.1018 | 40.0 | 11600 | 0.5720 | 0.4668 | | 0.0968 | 41.38 | 12000 | 0.5826 | 0.4698 | | 0.0874 | 42.76 | 12400 | 0.5937 | 0.4634 | | 0.0843 | 44.14 | 12800 | 0.6056 | 0.4640 | | 0.0822 | 45.52 | 13200 | 0.5531 | 0.4569 | | 0.0806 | 46.9 | 13600 | 0.5669 | 0.4484 | | 0.072 | 48.28 | 14000 | 0.5683 | 0.4484 | | 0.0734 | 49.65 | 14400 | 0.5735 | 0.4437 | | 0.0671 | 51.03 | 14800 | 0.5455 | 0.4394 | | 0.0617 | 52.41 | 15200 | 0.5838 | 0.4365 | | 0.0607 | 53.79 | 15600 | 0.6233 | 0.4397 | | 0.0593 | 55.17 | 16000 | 0.5649 | 0.4340 | | 0.0551 | 56.55 | 16400 | 0.5923 | 0.4392 | | 0.0503 | 57.93 | 16800 | 0.5858 | 0.4325 | | 0.0496 | 59.31 | 17200 | 0.5884 | 0.4301 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
nobuotto/distilbert-base-uncased-finetuned-imdb
089d4b96fb093c078029ddc9963e4d5de598553a
2022-05-25T00:56:20.000Z
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
nobuotto
null
nobuotto/distilbert-base-uncased-finetuned-imdb
1
null
transformers
32,428
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.4734 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7117 | 1.0 | 157 | 2.4976 | | 2.5773 | 2.0 | 314 | 2.4243 | | 2.5263 | 3.0 | 471 | 2.4348 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Tokenizers 0.12.1
sagnikrayc/prajjwal-bert-small-snli
5ef8796da4f28a27fa8c42003aedb88a08f1f51a
2022-05-25T02:54:36.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
sagnikrayc
null
sagnikrayc/prajjwal-bert-small-snli
1
null
transformers
32,429
Entry not found
ChrisKalahiki/mt5-small-finetuned-amazon-en-es
e77e7dea5137c35c809e97274ed2b7b51474aa45
2022-05-25T03:11:06.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
ChrisKalahiki
null
ChrisKalahiki/mt5-small-finetuned-amazon-en-es
1
null
transformers
32,430
Entry not found
morahil/wav2vec2-hindi-new
f768d2ed4d5e0a029017fbff35b293090e671b12
2022-05-25T06:09:22.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
morahil
null
morahil/wav2vec2-hindi-new
1
null
transformers
32,431
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-hindi-new results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-hindi-new This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 40 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.2.3.dev0 - Tokenizers 0.12.1
jihae/kogpt2news
b480fd14067f4f19470a41b4aa45cc0936518ab1
2022-06-03T04:37:13.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
jihae
null
jihae/kogpt2news
1
null
transformers
32,432
Entry not found
PontifexMaximus/mt5-small-finetuned-fa-to-en
a7fb60c812d42b2fa63a2a98ce3cc8d01049f86b
2022-05-25T06:58:04.000Z
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PontifexMaximus
null
PontifexMaximus/mt5-small-finetuned-fa-to-en
1
null
transformers
32,433
Entry not found
leonweber/muppet-large-118
95fc68bc87f8b217c517b4c6f402b58b91eda85b
2022-05-25T08:47:07.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
leonweber
null
leonweber/muppet-large-118
1
null
transformers
32,434
Entry not found
ronanki/ml_use_512_MNR_15
9be6c92760fae2df6cd27345a1e879d8ac9feacb
2022-05-25T12:11:55.000Z
[ "pytorch", "distilbert", "feature-extraction", "sentence-transformers", "sentence-similarity" ]
sentence-similarity
false
ronanki
null
ronanki/ml_use_512_MNR_15
1
null
sentence-transformers
32,435
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # ronanki/ml_use_512_MNR_15 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 512 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('ronanki/ml_use_512_MNR_15') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=ronanki/ml_use_512_MNR_15) ## Training The model was trained with the parameters: **DataLoader**: `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 8 with parameters: ``` {'batch_size': 4} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 5, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 0, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 512, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
neuralmagic/oBERT-12-downstream-pruned-unstructured-80-squadv1
40d5b575e3b9f9edb1c841f8330947d7188d20ed
2022-06-20T11:36:49.000Z
[ "pytorch", "en", "dataset:squad", "arxiv:2203.07259", "bert", "oBERT", "sparsity", "pruning", "compression" ]
null
false
neuralmagic
null
neuralmagic/oBERT-12-downstream-pruned-unstructured-80-squadv1
1
null
null
32,436
--- tags: - bert - oBERT - sparsity - pruning - compression language: en datasets: squad --- # oBERT-12-downstream-pruned-unstructured-80-squadv1 This model is obtained with [The Optimal BERT Surgeon: Scalable and Accurate Second-Order Pruning for Large Language Models](https://arxiv.org/abs/2203.07259). It corresponds to the model presented in the `Table 1 - 30 Epochs - oBERT - SQuADv1 80%`. ``` Pruning method: oBERT downstream unstructured Paper: https://arxiv.org/abs/2203.07259 Dataset: SQuADv1 Sparsity: 80% Number of layers: 12 ``` The dev-set performance reported in the paper is averaged over three seeds, and we release the best model (marked with `(*)`): ``` | oBERT 80% | F1 | EM | | ------------ | ----- | ----- | | seed=42 | 88.95 | 82.08 | | seed=3407 (*)| 89.16 | 82.05 | | seed=54321 | 89.01 | 82.12 | | ------------ | ----- | ----- | | mean | 89.04 | 82.08 | | stdev | 0.108 | 0.035 | ``` Code: _coming soon_ ## BibTeX entry and citation info ```bibtex @article{kurtic2022optimal, title={The Optimal BERT Surgeon: Scalable and Accurate Second-Order Pruning for Large Language Models}, author={Kurtic, Eldar and Campos, Daniel and Nguyen, Tuan and Frantar, Elias and Kurtz, Mark and Fineran, Benjamin and Goin, Michael and Alistarh, Dan}, journal={arXiv preprint arXiv:2203.07259}, year={2022} } ```
neuralmagic/oBERT-12-upstream-pruned-unstructured-97-finetuned-qqp
c40a3309f3517062d0bfcc9867fe836481f6bd6b
2022-06-20T11:40:11.000Z
[ "pytorch", "en", "dataset:qqp", "arxiv:2203.07259", "bert", "oBERT", "sparsity", "pruning", "compression" ]
null
false
neuralmagic
null
neuralmagic/oBERT-12-upstream-pruned-unstructured-97-finetuned-qqp
1
null
null
32,437
--- tags: - bert - oBERT - sparsity - pruning - compression language: en datasets: qqp --- # oBERT-12-upstream-pruned-unstructured-97-finetuned-qqp This model is obtained with [The Optimal BERT Surgeon: Scalable and Accurate Second-Order Pruning for Large Language Models](https://arxiv.org/abs/2203.07259). It corresponds to the model presented in the `Table 2 - oBERT - QQP 97%`. ``` Pruning method: oBERT upstream unstructured + sparse-transfer to downstream Paper: https://arxiv.org/abs/2203.07259 Dataset: QQP Sparsity: 97% Number of layers: 12 ``` The dev-set performance reported in the paper is averaged over three seeds, and we release the best model (marked with `(*)`): ``` | oBERT 97% | acc | F1 | | ------------ | ----- | ----- | | seed=42 (*)| 89.85 | 86.41 | | seed=3407 | 89.72 | 86.42 | | seed=54321 | 89.70 | 86.24 | | ------------ | ----- | ----- | | mean | 89.76 | 86.35 | | stdev | 0.081 | 0.101 | ``` Code: _coming soon_ ## BibTeX entry and citation info ```bibtex @article{kurtic2022optimal, title={The Optimal BERT Surgeon: Scalable and Accurate Second-Order Pruning for Large Language Models}, author={Kurtic, Eldar and Campos, Daniel and Nguyen, Tuan and Frantar, Elias and Kurtz, Mark and Fineran, Benjamin and Goin, Michael and Alistarh, Dan}, journal={arXiv preprint arXiv:2203.07259}, year={2022} } ```
neuralmagic/oBERT-6-downstream-dense-squadv1
3ad43f2b30b84816b7ff58ec0ce02ad632fe9022
2022-06-20T11:36:52.000Z
[ "pytorch", "en", "dataset:squad", "arxiv:2203.07259", "bert", "oBERT", "sparsity", "pruning", "compression" ]
null
false
neuralmagic
null
neuralmagic/oBERT-6-downstream-dense-squadv1
1
null
null
32,438
--- tags: - bert - oBERT - sparsity - pruning - compression language: en datasets: squad --- # oBERT-6-downstream-dense-squadv1 This model is obtained with [The Optimal BERT Surgeon: Scalable and Accurate Second-Order Pruning for Large Language Models](https://arxiv.org/abs/2203.07259). It corresponds to the model presented in the `Table 3 - 6 Layers - 0% Sparsity`, and it represents an upper bound for performance of the corresponding pruned models: - 80% unstructured: `neuralmagic/oBERT-6-downstream-pruned-unstructured-80-squadv1` - 80% block-4: `neuralmagic/oBERT-6-downstream-pruned-block4-80-squadv1` - 90% unstructured: `neuralmagic/oBERT-6-downstream-pruned-unstructured-90-squadv1` - 90% block-4: `neuralmagic/oBERT-6-downstream-pruned-block4-90-squadv1` SQuADv1 dev-set: ``` EM = 81.17 F1 = 88.32 ``` ## BibTeX entry and citation info ```bibtex @article{kurtic2022optimal, title={The Optimal BERT Surgeon: Scalable and Accurate Second-Order Pruning for Large Language Models}, author={Kurtic, Eldar and Campos, Daniel and Nguyen, Tuan and Frantar, Elias and Kurtz, Mark and Fineran, Benjamin and Goin, Michael and Alistarh, Dan}, journal={arXiv preprint arXiv:2203.07259}, year={2022} } ```
wrice/wav2vec2-base-timit-demo-google-colab
68942be183e216eaaa6168eba5684d712fe0a815
2022-05-26T00:23:52.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
wrice
null
wrice/wav2vec2-base-timit-demo-google-colab
1
null
transformers
32,439
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-google-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-google-colab This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6348 - Wer: 0.3204 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 4.2767 | 0.5 | 500 | 2.9921 | 1.0 | | 1.509 | 1.01 | 1000 | 0.8223 | 0.6031 | | 0.7226 | 1.51 | 1500 | 0.6185 | 0.4935 | | 0.5777 | 2.01 | 2000 | 0.5600 | 0.4569 | | 0.4306 | 2.51 | 2500 | 0.4985 | 0.4229 | | 0.3854 | 3.02 | 3000 | 0.5113 | 0.4200 | | 0.3161 | 3.52 | 3500 | 0.5197 | 0.4042 | | 0.2904 | 4.02 | 4000 | 0.4900 | 0.3936 | | 0.2404 | 4.52 | 4500 | 0.5209 | 0.3797 | | 0.2546 | 5.03 | 5000 | 0.4836 | 0.3855 | | 0.2278 | 5.53 | 5500 | 0.5194 | 0.3676 | | 0.2049 | 6.03 | 6000 | 0.5647 | 0.4042 | | 0.199 | 6.53 | 6500 | 0.5699 | 0.3932 | | 0.1932 | 7.04 | 7000 | 0.5498 | 0.3694 | | 0.1633 | 7.54 | 7500 | 0.5918 | 0.3686 | | 0.1674 | 8.04 | 8000 | 0.5298 | 0.3716 | | 0.1496 | 8.54 | 8500 | 0.5788 | 0.3726 | | 0.1488 | 9.05 | 9000 | 0.5603 | 0.3664 | | 0.1286 | 9.55 | 9500 | 0.5427 | 0.3550 | | 0.1364 | 10.05 | 10000 | 0.5794 | 0.3621 | | 0.1177 | 10.55 | 10500 | 0.5587 | 0.3606 | | 0.1126 | 11.06 | 11000 | 0.5788 | 0.3519 | | 0.1272 | 11.56 | 11500 | 0.5859 | 0.3595 | | 0.1414 | 12.06 | 12000 | 0.5852 | 0.3586 | | 0.1081 | 12.56 | 12500 | 0.5653 | 0.3727 | | 0.1073 | 13.07 | 13000 | 0.5653 | 0.3526 | | 0.0922 | 13.57 | 13500 | 0.5758 | 0.3583 | | 0.09 | 14.07 | 14000 | 0.5990 | 0.3599 | | 0.0987 | 14.57 | 14500 | 0.5837 | 0.3516 | | 0.0823 | 15.08 | 15000 | 0.5639 | 0.3454 | | 0.0752 | 15.58 | 15500 | 0.5663 | 0.3542 | | 0.0714 | 16.08 | 16000 | 0.6273 | 0.3419 | | 0.0693 | 16.58 | 16500 | 0.6389 | 0.3441 | | 0.0634 | 17.09 | 17000 | 0.6006 | 0.3409 | | 0.063 | 17.59 | 17500 | 0.6456 | 0.3444 | | 0.0627 | 18.09 | 18000 | 0.6706 | 0.3458 | | 0.0519 | 18.59 | 18500 | 0.6370 | 0.3396 | | 0.059 | 19.1 | 19000 | 0.6602 | 0.3390 | | 0.0495 | 19.6 | 19500 | 0.6642 | 0.3364 | | 0.0601 | 20.1 | 20000 | 0.6495 | 0.3408 | | 0.07 | 20.6 | 20500 | 0.6526 | 0.3476 | | 0.0517 | 21.11 | 21000 | 0.6265 | 0.3401 | | 0.0434 | 21.61 | 21500 | 0.6364 | 0.3372 | | 0.0383 | 22.11 | 22000 | 0.6742 | 0.3377 | | 0.0372 | 22.61 | 22500 | 0.6499 | 0.3330 | | 0.0329 | 23.12 | 23000 | 0.6877 | 0.3307 | | 0.0366 | 23.62 | 23500 | 0.6351 | 0.3303 | | 0.0372 | 24.12 | 24000 | 0.6547 | 0.3286 | | 0.031 | 24.62 | 24500 | 0.6757 | 0.3304 | | 0.0367 | 25.13 | 25000 | 0.6507 | 0.3312 | | 0.0309 | 25.63 | 25500 | 0.6645 | 0.3298 | | 0.03 | 26.13 | 26000 | 0.6342 | 0.3325 | | 0.0274 | 26.63 | 26500 | 0.6614 | 0.3255 | | 0.0236 | 27.14 | 27000 | 0.6614 | 0.3222 | | 0.0263 | 27.64 | 27500 | 0.6560 | 0.3242 | | 0.0264 | 28.14 | 28000 | 0.6337 | 0.3237 | | 0.0234 | 28.64 | 28500 | 0.6322 | 0.3208 | | 0.0249 | 29.15 | 29000 | 0.6367 | 0.3218 | | 0.0252 | 29.65 | 29500 | 0.6348 | 0.3204 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.8.2+cu111 - Datasets 1.17.0 - Tokenizers 0.11.6
cristinakuo/wav2vec2-sala1
eaf4eb561589d71c68554c2b6782a5a01155104e
2022-05-29T05:18:06.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
cristinakuo
null
cristinakuo/wav2vec2-sala1
1
null
transformers
32,440
Entry not found
duclee9x/wav2vec2-voa-example
4a3119d181cd5790f4bfc0f25d20354dea22fc56
2022-05-26T08:32:06.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
duclee9x
null
duclee9x/wav2vec2-voa-example
1
null
transformers
32,441
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-voa-example results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-voa-example This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: nan - Wer: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:---:| | 4.296 | 4.35 | 500 | 3.7226 | 1.0 | | 3.027 | 8.7 | 1000 | 3.7233 | 1.0 | | 3.0376 | 13.04 | 1500 | 3.7246 | 1.0 | | 3.0221 | 17.39 | 2000 | nan | 1.0 | | 0.0 | 21.74 | 2500 | nan | 1.0 | | 0.0 | 26.09 | 3000 | nan | 1.0 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
shufei/wav2vec2-common_voice-tr-demo
323ae62254d3ca7c66d091278575bb4e227063a1
2022-05-26T02:14:22.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
shufei
null
shufei/wav2vec2-common_voice-tr-demo
1
null
transformers
32,442
Entry not found
wrice/wavlm-large-timit-punctuation
5e26a301c6c16bd5f21d66d26ec970b0558bca64
2022-05-31T13:31:43.000Z
[ "pytorch", "tensorboard", "wavlm", "automatic-speech-recognition", "transformers", "generated_from_trainer", "model-index" ]
automatic-speech-recognition
false
wrice
null
wrice/wavlm-large-timit-punctuation
1
null
transformers
32,443
--- tags: - generated_from_trainer model-index: - name: wavlm-large-timit-punctuation results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wavlm-large-timit-punctuation This model is a fine-tuned version of [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3368 - Wer: 0.2601 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 5.2379 | 1.0 | 500 | 3.1228 | 1.0 | | 2.5847 | 2.01 | 1000 | 1.1550 | 0.9147 | | 1.0034 | 3.01 | 1500 | 0.5856 | 0.5180 | | 0.5868 | 4.02 | 2000 | 0.4238 | 0.4229 | | 0.3892 | 5.02 | 2500 | 0.3356 | 0.3665 | | 0.2926 | 6.02 | 3000 | 0.3196 | 0.3360 | | 0.2294 | 7.03 | 3500 | 0.3046 | 0.3170 | | 0.1976 | 8.03 | 4000 | 0.3032 | 0.3111 | | 0.1644 | 9.04 | 4500 | 0.2946 | 0.2954 | | 0.1574 | 10.04 | 5000 | 0.3211 | 0.2998 | | 0.1391 | 11.04 | 5500 | 0.2986 | 0.2922 | | 0.1124 | 12.05 | 6000 | 0.2948 | 0.2837 | | 0.1003 | 13.05 | 6500 | 0.2928 | 0.2788 | | 0.1031 | 14.06 | 7000 | 0.3230 | 0.2805 | | 0.0901 | 15.06 | 7500 | 0.3081 | 0.2749 | | 0.0842 | 16.06 | 8000 | 0.3075 | 0.2726 | | 0.0809 | 17.07 | 8500 | 0.3215 | 0.2717 | | 0.0747 | 18.07 | 9000 | 0.3272 | 0.2721 | | 0.0735 | 19.08 | 9500 | 0.3242 | 0.2684 | | 0.0631 | 20.08 | 10000 | 0.3216 | 0.2640 | | 0.0632 | 21.08 | 10500 | 0.3149 | 0.2646 | | 0.0625 | 22.09 | 11000 | 0.3196 | 0.2630 | | 0.0611 | 23.09 | 11500 | 0.3244 | 0.2638 | | 0.0532 | 24.1 | 12000 | 0.3271 | 0.2641 | | 0.0503 | 25.1 | 12500 | 0.3368 | 0.2636 | | 0.0534 | 26.1 | 13000 | 0.3393 | 0.2627 | | 0.049 | 27.11 | 13500 | 0.3389 | 0.2626 | | 0.0441 | 28.11 | 14000 | 0.3375 | 0.2605 | | 0.0522 | 29.12 | 14500 | 0.3368 | 0.2601 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.8.2+cu111 - Datasets 1.17.0 - Tokenizers 0.11.6
LDD/wwm-2
8044009f1efc735aaf172bd5780a2a24629650cc
2022-05-26T03:36:46.000Z
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
false
LDD
null
LDD/wwm-2
1
null
transformers
32,444
Entry not found
RuiqianLi/one-simple-finetune-test
1a5109c945f1e352ba92d7c793e3ca63c0a06478
2022-05-26T07:41:32.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:li_singlish", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
RuiqianLi
null
RuiqianLi/one-simple-finetune-test
1
null
transformers
32,445
--- license: apache-2.0 tags: - generated_from_trainer datasets: - li_singlish model-index: - name: one-simple-finetune-test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # one-simple-finetune-test This model is a fine-tuned version of [RuiqianLi/wav2vec2-large-xls-r-300m-singlish-colab](https://huggingface.co/RuiqianLi/wav2vec2-large-xls-r-300m-singlish-colab) on the li_singlish dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
Giseok/wav2vec2-base-STTTest
3a8337c467adee456eed5e2458653697bcb73618
2022-05-27T09:12:19.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Giseok
null
Giseok/wav2vec2-base-STTTest
1
null
transformers
32,446
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-STTTest results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-STTTest This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5198 - Wer: 0.3393 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 0.231 | 1.0 | 500 | 0.4337 | 0.4100 | | 0.1845 | 2.01 | 1000 | 0.4296 | 0.3931 | | 0.1551 | 3.01 | 1500 | 0.4397 | 0.3770 | | 0.1479 | 4.02 | 2000 | 0.4524 | 0.3827 | | 0.1186 | 5.02 | 2500 | 0.5182 | 0.3795 | | 0.1079 | 6.02 | 3000 | 0.4799 | 0.3737 | | 0.0974 | 7.03 | 3500 | 0.4966 | 0.3860 | | 0.0878 | 8.03 | 4000 | 0.4993 | 0.3699 | | 0.0788 | 9.04 | 4500 | 0.5183 | 0.3678 | | 0.0732 | 10.04 | 5000 | 0.5064 | 0.3635 | | 0.0664 | 11.04 | 5500 | 0.5330 | 0.3663 | | 0.0596 | 12.05 | 6000 | 0.5147 | 0.3516 | | 0.0538 | 13.05 | 6500 | 0.5254 | 0.3581 | | 0.0535 | 14.06 | 7000 | 0.4902 | 0.3534 | | 0.0492 | 15.06 | 7500 | 0.5115 | 0.3488 | | 0.0455 | 16.06 | 8000 | 0.5250 | 0.3472 | | 0.0434 | 17.07 | 8500 | 0.5338 | 0.3515 | | 0.0351 | 18.07 | 9000 | 0.5365 | 0.3444 | | 0.0341 | 19.08 | 9500 | 0.4886 | 0.3439 | | 0.0332 | 20.08 | 10000 | 0.5234 | 0.3475 | | 0.0289 | 21.08 | 10500 | 0.5375 | 0.3464 | | 0.028 | 22.09 | 11000 | 0.5395 | 0.3478 | | 0.0225 | 23.09 | 11500 | 0.5236 | 0.3428 | | 0.0244 | 24.1 | 12000 | 0.5122 | 0.3402 | | 0.0246 | 25.1 | 12500 | 0.5212 | 0.3390 | | 0.0214 | 26.1 | 13000 | 0.5198 | 0.3393 | | 0.0179 | 27.11 | 13500 | 0.5198 | 0.3393 | | 0.0194 | 28.11 | 14000 | 0.5198 | 0.3393 | | 0.0193 | 29.12 | 14500 | 0.5198 | 0.3393 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.10.1+cu111 - Datasets 1.18.3 - Tokenizers 0.12.1
aioxlabs/dvoice-swahili
05eb3471d4d24f9eee3b5482362533530145ca3e
2022-05-28T08:20:36.000Z
[ "wav2vec2", "feature-extraction", "sw", "dataset:commonvoice", "speechbrain", "CTC", "pytorch", "Transformer", "license:apache-2.0", "automatic-speech-recognition" ]
automatic-speech-recognition
false
aioxlabs
null
aioxlabs/dvoice-swahili
1
null
speechbrain
32,447
--- language: "sw" thumbnail: pipeline_tag: automatic-speech-recognition tags: - CTC - pytorch - speechbrain - Transformer license: "apache-2.0" datasets: - commonvoice metrics: - wer - cer --- <iframe src="https://ghbtns.com/github-btn.html?user=speechbrain&repo=speechbrain&type=star&count=true&size=large&v=2" frameborder="0" scrolling="0" width="170" height="30" title="GitHub"></iframe> <br/><br/> # wav2vec 2.0 with CTC/Attention trained on DVoice Swahili (No LM) This repository provides all the necessary tools to perform automatic speech recognition from an end-to-end system pretrained on a [DVoice-VoxLingua107](https://zenodo.org/record/6342622) Swahili dataset within SpeechBrain. For a better experience, we encourage you to learn more about [SpeechBrain](https://speechbrain.github.io). | DVoice Release | Val. CER | Val. WER | Test CER | Test WER | |:-------------:|:---------------------------:| -----:| -----:| -----:| | v2.0 | 8.83 | 22.78 | 9.46 | 23.16 | # Pipeline description This ASR system is composed of 2 different but linked blocks: - Tokenizer (unigram) that transforms words into subword units and trained with the train transcriptions. - Acoustic model (wav2vec2.0 + CTC). A pretrained wav2vec 2.0 model ([facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53)) is combined with two DNN layers and finetuned on the Darija dataset. The obtained final acoustic representation is given to the CTC greedy decoder. The system is trained with recordings sampled at 16kHz (single channel). The code will automatically normalize your audio (i.e., resampling + mono channel selection) when calling *transcribe_file* if needed. # Install SpeechBrain First of all, please install tranformers and SpeechBrain with the following command: ``` pip install speechbrain transformers ``` Please notice that we encourage you to read the SpeechBrain tutorials and learn more about [SpeechBrain](https://speechbrain.github.io). # Transcribing your own audio files (in Swahili) ```python from speechbrain.pretrained import EncoderASR asr_model = EncoderASR.from_hparams(source="aioxlabs/dvoice-swahili", savedir="pretrained_models/asr-wav2vec2-dvoice-sw") asr_model.transcribe_file('./the_path_to_your_audio_file') ``` # Inference on GPU To perform inference on the GPU, add `run_opts={"device":"cuda"}` when calling the `from_hparams` method. # Training To train the model from scratch, please see our GitHub tutorial [here](https://github.com/AIOXLABS/DVoice). # Limitations The SpeechBrain team does not provide any warranty on the performance achieved by this model when used on other datasets. # About DVoice DVoice is a community initiative that aims to provide Africa low resources languages with data and models to facilitate their use of voice technologies. The lack of data on these languages makes it necessary to collect data using methods that are specific to each one. Two different approaches are currently used: the DVoice platforms ([https://dvoice.ma](https://dvoice.ma) and [https://dvoice.sn](https://dvoice.sn)), which are based on Mozilla Common Voice, for collecting authentic recordings from the community, and transfer learning techniques for automatically labeling recordings that are retrived from social medias. The DVoice platform currently manages 7 languages including Darija (Moroccan Arabic dialect) whose dataset appears on this version, Wolof, Mandingo, Serere, Pular, Diola and Soninke. For this project, AIOX Labs the SI2M Laboratory are joining forces to build the future of technologies together. # About AIOX Labs Based in Rabat, London and Paris, AIOX-Labs mobilizes artificial intelligence technologies to meet the business needs and data projects of companies. - He is at the service of the growth of groups, the optimization of processes or the improvement of the customer experience. - AIOX-Labs is multi-sector, from fintech to industry, including retail and consumer goods. - Business ready data products with a solid algorithmic base and adaptability for the specific needs of each client. - A complementary team made up of doctors in AI and business experts with a solid scientific base and international publications. Website: [https://www.aiox-labs.com/](https://www.aiox-labs.com/) # SI2M Laboratory The Information Systems, Intelligent Systems and Mathematical Modeling Research Laboratory (SI2M) is an academic research laboratory of the National Institute of Statistics and Applied Economics (INSEA). The research areas of the laboratories are Information Systems, Intelligent Systems, Artificial Intelligence, Decision Support, Network and System Security, Mathematical Modelling. Website: [SI2M Laboratory](https://insea.ac.ma/index.php/pole-recherche/equipe-de-recherche/150-laboratoire-de-recherche-en-systemes-d-information-systemes-intelligents-et-modelisation-mathematique) # About SpeechBrain SpeechBrain is an open-source and all-in-one speech toolkit. It is designed to be simple, extremely flexible, and user-friendly. Competitive or state-of-the-art performance is obtained in various domains. Website: https://speechbrain.github.io/ GitHub: https://github.com/speechbrain/speechbrain # Referencing SpeechBrain ``` @misc{SB2021, author = {Ravanelli, Mirco and Parcollet, Titouan and Rouhe, Aku and Plantinga, Peter and Rastorgueva, Elena and Lugosch, Loren and Dawalatabad, Nauman and Ju-Chieh, Chou and Heba, Abdel and Grondin, Francois and Aris, William and Liao, Chien-Feng and Cornell, Samuele and Yeh, Sung-Lin and Na, Hwidong and Gao, Yan and Fu, Szu-Wei and Subakan, Cem and De Mori, Renato and Bengio, Yoshua }, title = {SpeechBrain}, year = {2021}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\\\\url{https://github.com/speechbrain/speechbrain}}, } ``` # Acknowledgements This research was supported through computational resources of HPC-MARWAN (www.marwan.ma/hpc) provided by CNRST, Rabat, Morocco. We deeply thank this institution.
Kashni/damontvd
1596c55f02d93be66caff56d552d8edb094f3e1a
2022-05-26T11:43:34.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
Kashni
null
Kashni/damontvd
1
null
transformers
32,448
--- tags: - conversation --- #Damon from TVD
forcorpus/bert-base-uncased-finetune-security
78f09fc513e9bc8b6104077438e0ba7e41922c5f
2022-05-26T11:41:39.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
forcorpus
null
forcorpus/bert-base-uncased-finetune-security
1
null
transformers
32,449
Entry not found
theojolliffe/bart-large-cnn-pubmed1o3
c39846503f67948f0ee42798ba7d017ab0e0d485
2022-05-27T13:19:47.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "dataset:scientific_papers", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
theojolliffe
null
theojolliffe/bart-large-cnn-pubmed1o3
1
null
transformers
32,450
--- license: mit tags: - generated_from_trainer datasets: - scientific_papers metrics: - rouge model-index: - name: bart-large-cnn-pubmed1o3 results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: scientific_papers type: scientific_papers args: pubmed metrics: - name: Rouge1 type: rouge value: 36.7566 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-cnn-pubmed1o3 This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on the scientific_papers dataset. It achieves the following results on the evaluation set: - Loss: 1.9359 - Rouge1: 36.7566 - Rouge2: 14.813 - Rougel: 22.4693 - Rougelsum: 33.4325 - Gen Len: 138.7332 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:|:--------:| | 2.028 | 1.0 | 19988 | 1.9359 | 36.7566 | 14.813 | 22.4693 | 33.4325 | 138.7332 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Aiyshwariya/bert-finetuned-squad
2a7c6f30759cfb0a02b09b9b09190a3555b16d19
2022-05-26T20:12:18.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
Aiyshwariya
null
Aiyshwariya/bert-finetuned-squad
1
null
transformers
32,451
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
prodm93/GPT2Dynamic_text_model_v1
ccf843e0d8573d73d848f3c1c0d1972b55369117
2022-05-26T19:00:53.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
prodm93
null
prodm93/GPT2Dynamic_text_model_v1
1
null
transformers
32,452
Entry not found
SherryLiu/inst0075model
63160908e9bbcb2263d5dc68bedbc7dcbcf8dad5
2022-05-26T22:45:30.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "license:afl-3.0", "autotrain_compatible" ]
fill-mask
false
SherryLiu
null
SherryLiu/inst0075model
1
null
transformers
32,453
--- license: afl-3.0 ---
coreybrady/coreyresults
9a7b3d6affae0c5472558f6648c496c8a2e36984
2022-05-27T19:38:53.000Z
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
coreybrady
null
coreybrady/coreyresults
1
null
transformers
32,454
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: coreyresults results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # coreyresults This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
olpa/xlm-roberta-base-finetuned-panx-de
287eee13d894eaa24730f6b9b7223e2e0f881752
2022-05-30T03:26:44.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
olpa
null
olpa/xlm-roberta-base-finetuned-panx-de
1
null
transformers
32,455
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8627004891366169 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1363 - F1: 0.8627 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2539 | 1.0 | 525 | 0.1697 | 0.8179 | | 0.1317 | 2.0 | 1050 | 0.1327 | 0.8516 | | 0.0819 | 3.0 | 1575 | 0.1363 | 0.8627 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
geomos/distilbert-base-uncased-finetuned-imdb
74dbfd0d2074cd22bbd2a93a513f853732873230
2022-05-27T04:40:19.000Z
[ "pytorch", "distilbert", "fill-mask", "dataset:imdb", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
geomos
null
geomos/distilbert-base-uncased-finetuned-imdb
1
null
transformers
32,456
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: distilbert-base-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 2.2424 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.4921 | 1.0 | 479 | 2.3047 | | 2.3893 | 2.0 | 958 | 2.2607 | | 2.3571 | 3.0 | 1437 | 2.2481 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.10.1 - Datasets 2.2.2 - Tokenizers 0.10.3
PSW/samsum_reverse_train
ab36aedc32756d7e4a64128d1dfff6c67bdd323b
2022-05-31T07:08:28.000Z
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
PSW
null
PSW/samsum_reverse_train
1
null
transformers
32,457
Entry not found
KoichiYasuoka/deberta-large-japanese-upos
1e70ccea11d3c1dd6121ddca5f82ef8b6cf9a530
2022-05-27T06:54:18.000Z
[ "pytorch", "deberta-v2", "token-classification", "ja", "dataset:universal_dependencies", "transformers", "japanese", "pos", "dependency-parsing", "license:cc-by-sa-4.0", "autotrain_compatible" ]
token-classification
false
KoichiYasuoka
null
KoichiYasuoka/deberta-large-japanese-upos
1
null
transformers
32,458
--- language: - "ja" tags: - "japanese" - "token-classification" - "pos" - "dependency-parsing" datasets: - "universal_dependencies" license: "cc-by-sa-4.0" pipeline_tag: "token-classification" widget: - text: "国境の長いトンネルを抜けると雪国であった。" --- # deberta-large-japanese-upos ## Model Description This is a DeBERTa(V2) model pre-trained on 青空文庫 texts for POS-tagging and dependency-parsing, derived from [deberta-large-japanese-aozora](https://huggingface.co/KoichiYasuoka/deberta-large-japanese-aozora). Every short-unit-word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech). ## How to Use ```py import torch from transformers import AutoTokenizer,AutoModelForTokenClassification tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/deberta-large-japanese-upos") model=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/deberta-large-japanese-upos") s="国境の長いトンネルを抜けると雪国であった。" t=tokenizer.tokenize(s) p=[model.config.id2label[q] for q in torch.argmax(model(tokenizer.encode(s,return_tensors="pt"))["logits"],dim=2)[0].tolist()[1:-1]] print(list(zip(t,p))) ``` or ```py import esupar nlp=esupar.load("KoichiYasuoka/deberta-large-japanese-upos") print(nlp("国境の長いトンネルを抜けると雪国であった。")) ``` ## See Also [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa/DeBERTa models
chrisvinsen/xlsr-wav2vec2-final-1-lm-1
251b3d8389a265061bbf0390ed35b2c383f7b7b3
2022-05-29T01:09:25.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
chrisvinsen
null
chrisvinsen/xlsr-wav2vec2-final-1-lm-1
1
null
transformers
32,459
CommonVoice Dataset 8.0 --> Train + Test + Validation WER : 0.216 WER with LM: 0.123
dkasti/xlm-roberta-base-finetuned-panx-de
0d67a2c373cea2b544b847d87dfd6708009ee95e
2022-06-02T00:32:38.000Z
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:xtreme", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
false
dkasti
null
dkasti/xlm-roberta-base-finetuned-panx-de
1
null
transformers
32,460
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8615769427548178 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1401 - F1: 0.8616 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2605 | 1.0 | 525 | 0.1708 | 0.8198 | | 0.1274 | 2.0 | 1050 | 0.1415 | 0.8449 | | 0.0819 | 3.0 | 1575 | 0.1401 | 0.8616 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0 - Datasets 1.16.1 - Tokenizers 0.10.3
deathmite/DiabloGPT-small-potaru
3a5e33031abc4892b03303c7968ba79fae46f6d8
2022-05-27T09:01:44.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
deathmite
null
deathmite/DiabloGPT-small-potaru
1
null
transformers
32,461
--- tags: - conversational --- # Potaru DiabloGPT model
huggingtweets/eyeofjackiechan
d31b740b2938a8a9a266d1bffa0e83f37a2880c6
2022-06-20T01:04:58.000Z
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "huggingtweets" ]
text-generation
false
huggingtweets
null
huggingtweets/eyeofjackiechan
1
null
transformers
32,462
--- language: en thumbnail: http://www.huggingtweets.com/eyeofjackiechan/1655687093014/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/644052743/logo_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Jackie Chan</div> <div style="text-align: center; font-size: 14px;">@eyeofjackiechan</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Jackie Chan. | Data | Jackie Chan | | --- | --- | | Tweets downloaded | 2411 | | Retweets | 24 | | Short tweets | 109 | | Tweets kept | 2278 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/xs2o3djj/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @eyeofjackiechan's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1jlgydkw) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1jlgydkw/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/eyeofjackiechan') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
theojolliffe/bart-large-cnn-pubmed1o3-pubmed2o3
9db53aea5f4e9bc0481107b13626da3afabde9dd
2022-05-27T18:59:12.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "dataset:scientific_papers", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
theojolliffe
null
theojolliffe/bart-large-cnn-pubmed1o3-pubmed2o3
1
null
transformers
32,463
--- license: mit tags: - generated_from_trainer datasets: - scientific_papers metrics: - rouge model-index: - name: bart-large-cnn-pubmed1o3-pubmed2o3 results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: scientific_papers type: scientific_papers args: pubmed metrics: - name: Rouge1 type: rouge value: 37.4586 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-cnn-pubmed1o3-pubmed2o3 This model is a fine-tuned version of [theojolliffe/bart-large-cnn-pubmed1o3](https://huggingface.co/theojolliffe/bart-large-cnn-pubmed1o3) on the scientific_papers dataset. It achieves the following results on the evaluation set: - Loss: 1.8817 - Rouge1: 37.4586 - Rouge2: 15.5572 - Rougel: 23.0686 - Rougelsum: 34.1522 - Gen Len: 138.379 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.9586 | 1.0 | 19988 | 1.8817 | 37.4586 | 15.5572 | 23.0686 | 34.1522 | 138.379 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Splend1dchan/wav2vec2-large-lv60_byt5-small_nofreeze_bs64
172b8d6d97548c2cb361976ce669bd3ffcff60d2
2022-05-31T15:00:45.000Z
[ "pytorch", "speechmix", "transformers" ]
null
false
Splend1dchan
null
Splend1dchan/wav2vec2-large-lv60_byt5-small_nofreeze_bs64
1
null
transformers
32,464
Entry not found
nataliebhuerta/wav2vec2-base-finetuned-ks
305eb89a86c15156ddd9800e7c59b0ac7a17ae10
2022-05-27T14:46:35.000Z
[ "pytorch", "tensorboard", "wav2vec2", "audio-classification", "dataset:superb", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
audio-classification
false
nataliebhuerta
null
nataliebhuerta/wav2vec2-base-finetuned-ks
1
null
transformers
32,465
--- license: apache-2.0 tags: - generated_from_trainer datasets: - superb model-index: - name: wav2vec2-base-finetuned-ks results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-finetuned-ks This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the superb dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.14.0 - Tokenizers 0.10.3
shafin/distilbert-base-uncased-finetuned-cust
849e6b0f6ea96fdde2b58a4b002628fb95b30ef4
2022-05-27T16:42:07.000Z
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
fill-mask
false
shafin
null
shafin/distilbert-base-uncased-finetuned-cust
1
null
transformers
32,466
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-cust results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cust This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0735 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.4249 | 1.0 | 625 | 2.2071 | | 2.2697 | 2.0 | 1250 | 2.1411 | | 2.2092 | 3.0 | 1875 | 2.1255 | | 2.1674 | 4.0 | 2500 | 2.0682 | | 2.1499 | 5.0 | 3125 | 2.0667 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Satyamatury/wav2vec2-large-xls-r-300m-hindi-colab
8a1de2618cda9b25c6eab02968a5715e5842f436
2022-06-13T11:08:04.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Satyamatury
null
Satyamatury/wav2vec2-large-xls-r-300m-hindi-colab
1
null
transformers
32,467
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-hindi-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-hindi-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 1.7529 - Wer: 0.9130 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 60 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 6.2923 | 44.42 | 400 | 1.7529 | 0.9130 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.12.1
Santiagot1105/wav2vec2-l-xlsr-es-col-pro-noise
fccf80847d0f48ad595cd5cbbb000708a54a057f
2022-05-30T06:08:39.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
Santiagot1105
null
Santiagot1105/wav2vec2-l-xlsr-es-col-pro-noise
1
null
transformers
32,468
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-l-xlsr-es-col-pro-noise results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-l-xlsr-es-col-pro-noise This model is a fine-tuned version of [jonatasgrosman/wav2vec2-large-xlsr-53-spanish](https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-spanish) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0677 - Wer: 0.0380 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.94 | 1.21 | 400 | 0.0800 | 0.0814 | | 0.4711 | 2.42 | 800 | 0.0730 | 0.0692 | | 0.3451 | 3.62 | 1200 | 0.0729 | 0.0669 | | 0.2958 | 4.83 | 1600 | 0.0796 | 0.0667 | | 0.2544 | 6.04 | 2000 | 0.0808 | 0.0584 | | 0.227 | 7.25 | 2400 | 0.0791 | 0.0643 | | 0.2061 | 8.46 | 2800 | 0.0718 | 0.0582 | | 0.1901 | 9.67 | 3200 | 0.0709 | 0.0587 | | 0.179 | 10.87 | 3600 | 0.0698 | 0.0558 | | 0.1693 | 12.08 | 4000 | 0.0709 | 0.0530 | | 0.1621 | 13.29 | 4400 | 0.0640 | 0.0487 | | 0.1443 | 14.5 | 4800 | 0.0793 | 0.0587 | | 0.1408 | 15.71 | 5200 | 0.0741 | 0.0528 | | 0.1377 | 16.92 | 5600 | 0.0702 | 0.0462 | | 0.1292 | 18.13 | 6000 | 0.0822 | 0.0539 | | 0.1197 | 19.33 | 6400 | 0.0625 | 0.0436 | | 0.1137 | 20.54 | 6800 | 0.0650 | 0.0419 | | 0.1017 | 21.75 | 7200 | 0.0630 | 0.0392 | | 0.0976 | 22.96 | 7600 | 0.0630 | 0.0387 | | 0.0942 | 24.17 | 8000 | 0.0631 | 0.0380 | | 0.0924 | 25.38 | 8400 | 0.0645 | 0.0374 | | 0.0862 | 26.59 | 8800 | 0.0677 | 0.0402 | | 0.0831 | 27.79 | 9200 | 0.0680 | 0.0393 | | 0.077 | 29.0 | 9600 | 0.0677 | 0.0380 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.1+cu102 - Datasets 1.13.3 - Tokenizers 0.10.3
sanbohork/Caso3_T5
359fe741dc8872759179c2400f2153b146fede0b
2022-05-28T13:35:02.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "license:other", "autotrain_compatible" ]
text2text-generation
false
sanbohork
null
sanbohork/Caso3_T5
1
null
transformers
32,469
--- license: other --- Este modelo busca generar el titulo de un texto, se tomo como base el articulo: https://medium.com/nlplanet/a-full-guide-to-finetuning-t5-for-text2text-and-building-a-demo-with-streamlit-c72009631887 Se entreno el modelo con 500 elementos del dataset Genera el titulo del texto
ElMuchoDingDong/DialoGPT-medium-AudreyHepburn_v4
7113d4a433c6fc57c8f7c37d4fd1aa6f85d56736
2022-05-27T20:27:21.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
ElMuchoDingDong
null
ElMuchoDingDong/DialoGPT-medium-AudreyHepburn_v4
1
null
transformers
32,470
--- tags: - conversational --- #Audrey Hepburn DialoGPT Model
eugenecamus/resnet-50-base-beans-demo
0fbcde2fc6b720e18e4a94f5d6ccf7bccea41b0f
2022-05-31T17:47:56.000Z
[ "pytorch", "tensorboard", "resnet", "image-classification", "dataset:beans", "transformers", "vision", "generated_from_trainer", "model-index" ]
image-classification
false
eugenecamus
null
eugenecamus/resnet-50-base-beans-demo
1
null
transformers
32,471
--- tags: - image-classification - vision - generated_from_trainer datasets: - beans metrics: - accuracy model-index: - name: resnet-50-base-beans-demo results: - task: name: Image Classification type: image-classification dataset: name: beans type: beans args: default metrics: - name: Accuracy type: accuracy value: 0.9022556390977443 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet-50-base-beans-demo This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.2188 - Accuracy: 0.9023 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5679 | 1.0 | 130 | 0.2188 | 0.9023 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu102 - Datasets 2.2.1 - Tokenizers 0.12.1
jplu/adel-dbpedia-retrieval
89b7e1e6d35306d21678b7cb794885b8fa0ff21a
2022-05-27T21:58:29.000Z
[ "pytorch", "distilbert", "feature-extraction", "sentence-transformers", "sentence-similarity", "transformers" ]
sentence-similarity
false
jplu
null
jplu/adel-dbpedia-retrieval
1
null
sentence-transformers
32,472
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 71 with parameters: ``` {'batch_size': 8, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `beir.losses.margin_mse_loss.MarginMSELoss` Parameters of the fit()-Method: ``` { "epochs": 11, "evaluation_steps": 10000, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "correct_bias": false, "eps": 1e-06, "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Julietheg/checkpoint-1000
a55afa8f11ffc26d533886b8d6fb8cdd2bff8600
2022-05-28T00:57:02.000Z
[ "pytorch", "tf", "jax", "t5", "text2text-generation", "transformers", "generated_from_keras_callback", "model-index", "autotrain_compatible" ]
text2text-generation
false
Julietheg
null
Julietheg/checkpoint-1000
1
null
transformers
32,473
--- tags: - generated_from_keras_callback model-index: - name: checkpoint-1000 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # checkpoint-1000 This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: None - training_precision: float32 ### Training results ### Framework versions - Transformers 4.19.2 - TensorFlow 2.8.0 - Datasets 2.2.2 - Tokenizers 0.12.1
edharepe/T5_generacion_titulos
b4578843e0d1dbd99f5690f6b8251e6daf1f5a00
2022-05-28T05:32:42.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
edharepe
null
edharepe/T5_generacion_titulos
1
null
transformers
32,474
Este modelo ha sido creado a partir de T5 Fine tuning with PyTorch.ipynb de Shivanand Roy y entrenado con un dataset de noticias de un diario uruguayo, en el repositorio se encuentra todos los archivos resultante del procesos de entrenamiento
aiface/test285
d84e1b8d1caeddcc2e6747b87ff27e96096ce3a5
2022-05-28T08:57:56.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:vivos_dataset", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
aiface
null
aiface/test285
1
null
transformers
32,475
--- license: apache-2.0 tags: - generated_from_trainer datasets: - vivos_dataset model-index: - name: test285 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test285 This model is a fine-tuned version of [aiface/cv8](https://huggingface.co/aiface/cv8) on the vivos_dataset dataset. It achieves the following results on the evaluation set: - eval_loss: 0.3865 - eval_wer: 0.3012 - eval_runtime: 39.5722 - eval_samples_per_second: 19.205 - eval_steps_per_second: 2.401 - epoch: 1.1 - step: 400 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.19.2 - Pytorch 1.10.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
KoichiYasuoka/deberta-small-coptic
3e8086b8f36eea9409631f544972068e6f05b0a3
2022-05-28T08:48:57.000Z
[ "pytorch", "deberta-v2", "fill-mask", "cop", "transformers", "coptic", "masked-lm", "license:cc-by-sa-4.0", "autotrain_compatible" ]
fill-mask
false
KoichiYasuoka
null
KoichiYasuoka/deberta-small-coptic
1
null
transformers
32,476
--- language: - "cop" tags: - "coptic" - "masked-lm" license: "cc-by-sa-4.0" pipeline_tag: "fill-mask" mask_token: "[MASK]" --- # deberta-small-coptic ## Model Description This is a DeBERTa(V2) model pre-trained on Coptic Scriptorium Corpora. You can fine-tune `deberta-small-coptic` for downstream tasks, such as [POS-tagging](https://huggingface.co/KoichiYasuoka/deberta-small-coptic-upos), dependency-parsing, and so on. ## How to Use ```py from transformers import AutoTokenizer,AutoModelForMaskedLM tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/deberta-small-coptic") model=AutoModelForMaskedLM.from_pretrained("KoichiYasuoka/deberta-small-coptic") ```
KoichiYasuoka/deberta-small-coptic-upos
2517d835b70ad887bd29b59fbed4813fe45aedc9
2022-05-28T09:15:07.000Z
[ "pytorch", "deberta-v2", "token-classification", "cop", "dataset:universal_dependencies", "transformers", "coptic", "pos", "dependency-parsing", "license:cc-by-sa-4.0", "autotrain_compatible" ]
token-classification
false
KoichiYasuoka
null
KoichiYasuoka/deberta-small-coptic-upos
1
null
transformers
32,477
--- language: - "cop" tags: - "coptic" - "token-classification" - "pos" - "dependency-parsing" datasets: - "universal_dependencies" license: "cc-by-sa-4.0" pipeline_tag: "token-classification" widget: - text: "ⲧⲉⲛⲟⲩⲇⲉⲛ̄ⲟⲩⲟⲉⲓⲛϩ︤ⲙ︥ⲡϫⲟⲉⲓⲥ·" - text: "ⲙⲟⲟϣⲉϩⲱⲥϣⲏⲣⲉⲙ̄ⲡⲟⲩⲟⲉⲓⲛ·" --- # deberta-small-coptic-upos ## Model Description This is a DeBERTa(V2) model pre-trained with [UD_Coptic](https://universaldependencies.org/cop/) for POS-tagging and dependency-parsing, derived from [deberta-small-coptic](https://huggingface.co/KoichiYasuoka/deberta-small-coptic). Every word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech). ## How to Use ```py from transformers import AutoTokenizer,AutoModelForTokenClassification tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/deberta-small-coptic-upos") model=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/deberta-small-coptic-upos") ``` or ``` import esupar nlp=esupar.load("KoichiYasuoka/deberta-small-coptic-upos") ``` ## See Also [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa/DeBERTa models
theojolliffe/bart-large-cnn-pubmed1o3-pubmed2o3-pubmed3o3
38b237719a0a09c73ece656a4573718c10a03ef8
2022-05-28T14:46:08.000Z
[ "pytorch", "tensorboard", "bart", "text2text-generation", "dataset:scientific_papers", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
text2text-generation
false
theojolliffe
null
theojolliffe/bart-large-cnn-pubmed1o3-pubmed2o3-pubmed3o3
1
null
transformers
32,478
--- license: mit tags: - generated_from_trainer datasets: - scientific_papers metrics: - rouge model-index: - name: bart-large-cnn-pubmed1o3-pubmed2o3-pubmed3o3 results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: scientific_papers type: scientific_papers args: pubmed metrics: - name: Rouge1 type: rouge value: 37.5622 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-cnn-pubmed1o3-pubmed2o3-pubmed3o3 This model is a fine-tuned version of [theojolliffe/bart-large-cnn-pubmed1o3-pubmed2o3](https://huggingface.co/theojolliffe/bart-large-cnn-pubmed1o3-pubmed2o3) on the scientific_papers dataset. It achieves the following results on the evaluation set: - Loss: 1.8540 - Rouge1: 37.5622 - Rouge2: 15.5848 - Rougel: 23.1384 - Rougelsum: 34.2695 - Gen Len: 138.0326 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | 1.9205 | 1.0 | 19987 | 1.8540 | 37.5622 | 15.5848 | 23.1384 | 34.2695 | 138.0326 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
gary109/ai-light-dance_pretrain_wav2vec2-large-lv60
940ab8af063590ef66750ab60796e9b4ea7c40ee
2022-06-10T17:53:36.000Z
[ "pytorch", "wav2vec2", "pretraining", "transformers" ]
null
false
gary109
null
gary109/ai-light-dance_pretrain_wav2vec2-large-lv60
1
1
transformers
32,479
Entry not found
autoevaluate/summarization
3f3246b0f042523dc5580308c2ee61931903efa1
2022-05-28T13:18:28.000Z
[ "pytorch", "tensorboard", "t5", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
false
autoevaluate
null
autoevaluate/summarization
1
null
transformers
32,480
--- license: apache-2.0 tags: - generated_from_trainer datasets: - xsum metrics: - rouge model-index: - name: summarization results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: xsum type: xsum args: default metrics: - name: Rouge1 type: rouge value: 23.9405 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # summarization This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the xsum dataset. It achieves the following results on the evaluation set: - Loss: 2.6690 - Rouge1: 23.9405 - Rouge2: 5.0879 - Rougel: 18.4981 - Rougelsum: 18.5032 - Gen Len: 18.7376 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 2.9249 | 0.08 | 1000 | 2.6690 | 23.9405 | 5.0879 | 18.4981 | 18.5032 | 18.7376 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
ruselkomp/deeppavlov-framebank-full-5epochs
af8687e68222f0eee34841d84d7046b19e045cd9
2022-05-29T16:05:39.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
question-answering
false
ruselkomp
null
ruselkomp/deeppavlov-framebank-full-5epochs
1
null
transformers
32,481
--- tags: - generated_from_trainer model-index: - name: deeppavlov-framebank-full-5epochs results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deeppavlov-framebank-full-5epochs This model is a fine-tuned version of [DeepPavlov/rubert-base-cased](https://huggingface.co/DeepPavlov/rubert-base-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.4206 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.0742 | 1.0 | 2827 | 1.0130 | | 0.7934 | 2.0 | 5654 | 1.0363 | | 0.5931 | 3.0 | 8481 | 1.1527 | | 0.4166 | 4.0 | 11308 | 1.2754 | | 0.3145 | 5.0 | 14135 | 1.4206 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.2.3.dev0 - Tokenizers 0.12.1
Jefferson/PruebaPLN
a37f0abc3d124ee6ba24e63f52758f7ba188ebce
2022-05-28T13:16:51.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
Jefferson
null
Jefferson/PruebaPLN
1
null
transformers
32,482
Entry not found
stevemobs/deberta-base-combined-squad1-aqa-and-newsqa
e9fce73b49bb64b53fc171def33eb7479408f967
2022-05-29T01:57:06.000Z
[ "pytorch", "tensorboard", "deberta", "question-answering", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
stevemobs
null
stevemobs/deberta-base-combined-squad1-aqa-and-newsqa
1
null
transformers
32,483
--- license: mit tags: - generated_from_trainer model-index: - name: deberta-base-combined-squad1-aqa-and-newsqa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-combined-squad1-aqa-and-newsqa This model is a fine-tuned version of [stevemobs/deberta-base-combined-squad1-aqa](https://huggingface.co/stevemobs/deberta-base-combined-squad1-aqa) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7527 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.6729 | 1.0 | 17307 | 0.7076 | | 0.4631 | 2.0 | 34614 | 0.7527 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
JuanForeroNeme/ES_UC_MODELO_NPL_E3
081f1fe289c190d7a4e4d24f889be951628cb671
2022-05-28T16:05:52.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
JuanForeroNeme
null
JuanForeroNeme/ES_UC_MODELO_NPL_E3
1
null
transformers
32,484
Entry not found
ahGadji/dummy-model
49e9f898e6d4ecd4b8bfa123bbb410da2893e836
2022-05-28T16:56:37.000Z
[ "pytorch", "camembert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
ahGadji
null
ahGadji/dummy-model
1
null
transformers
32,485
Entry not found
JuanForeroNeme/ES_UC_MODELO_NPL_E3_V0
f50d5a6b98daae04ab16787d2c1e7f1bce316abd
2022-05-28T17:30:57.000Z
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
false
JuanForeroNeme
null
JuanForeroNeme/ES_UC_MODELO_NPL_E3_V0
1
null
transformers
32,486
Entry not found
Anjoe/german-poetry-bert
2bf96e8111b4196389786717014b5044adbe4daf
2022-07-21T14:27:42.000Z
[ "pytorch", "tf", "bert", "fill-mask", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
false
Anjoe
null
Anjoe/german-poetry-bert
1
null
transformers
32,487
--- license: mit ---
subhasisj/squad-qa-minilmv2-XLMTokeinizer-8
4b784a0439916cc4eaed836baa0ed06d534bbcce
2022-05-28T19:40:23.000Z
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
false
subhasisj
null
subhasisj/squad-qa-minilmv2-XLMTokeinizer-8
1
null
transformers
32,488
Entry not found
stevemobs/deberta-base-finetuned-squad1-aqa-newsqa
5bb9645f0bf1eaa3d0856459a9e18213f5446930
2022-05-29T00:44:00.000Z
[ "pytorch", "tensorboard", "deberta", "question-answering", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
stevemobs
null
stevemobs/deberta-base-finetuned-squad1-aqa-newsqa
1
null
transformers
32,489
--- license: mit tags: - generated_from_trainer model-index: - name: deberta-base-finetuned-squad1-aqa-newsqa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-finetuned-squad1-aqa-newsqa This model is a fine-tuned version of [stevemobs/deberta-base-finetuned-squad1-aqa](https://huggingface.co/stevemobs/deberta-base-finetuned-squad1-aqa) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7525 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.659 | 1.0 | 17307 | 0.7169 | | 0.4718 | 2.0 | 34614 | 0.7525 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
stevemobs/deberta-base-newsqa
65199077bad3f015c46e2d91396aae696d75fc0d
2022-05-29T10:41:08.000Z
[ "pytorch", "tensorboard", "deberta", "question-answering", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
stevemobs
null
stevemobs/deberta-base-newsqa
1
null
transformers
32,490
--- license: mit tags: - generated_from_trainer model-index: - name: deberta-base-newsqa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-newsqa This model is a fine-tuned version of [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7628 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.6847 | 1.0 | 17307 | 0.7396 | | 0.4916 | 2.0 | 34614 | 0.7628 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
BigSalmon/InformalToFormalLincoln47
2086d9d0dd740fcd30120a986c49f226984625e8
2022-05-29T01:56:43.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
BigSalmon
null
BigSalmon/InformalToFormalLincoln47
1
null
transformers
32,491
``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln45") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (Warriors vs. Rockets in Game 7): text: eagerly anticipated by fans, game 7's are the highlight of the post-season. text: ever-building in suspense, game 7's have the crowd captivated. *** Essay Intro (South Korean TV Is Becoming Popular): text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ). text: increasingly held in critical esteem, south korean television continues to impress. text: at the forefront of quality content, south korea is quickly achieving celebrity status. *** Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. text: failing to draw in the masses, the nba has ( fallen into / succumb to / bowed to ) disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap ( solutions / interventions / enhancements ) could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ``` ``` wordy: classical music is becoming less popular more and more. Translate into Concise Text: interest in classic music is fading. *** wordy: ``` ``` sweet: savvy voters ousted him. longer: voters who were informed delivered his defeat. *** sweet: ``` ``` 1: commercial space company spacex plans to launch a whopping 52 flights in 2022. 2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022. 3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights. 4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company. 5: a commercial space company, spacex aims to conduct 52 flights in 2022. *** 1: ``` Keywords to sentences or sentence. ``` ngos are characterized by: □ voluntary citizens' group that is organized on a local, national or international level □ encourage political participation □ often serve humanitarian functions □ work for social, economic, or environmental change *** what are the drawbacks of living near an airbnb? □ noise □ parking □ traffic □ security □ strangers *** ``` ``` original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung. adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung. *** original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark. adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark. *** original: ``` ``` original: had trouble deciding. translated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation. *** original: ``` ``` input: pragmatic 1800s english: rational, strategic, judicious, reasonable, circumspect, commonsensical, calculating, cool-headed, intentional, far-sighted *** input: not loyal 1800s english: two-faced, inimical, perfidious, duplicitous, mendacious, double-dealing, shifty *** input:```
stevemobs/deberta-base-finetuned-aqa-squad1-newsqa
54df3d93d80d70eb75908d7a4cae0a1d4cfc607b
2022-05-29T09:16:31.000Z
[ "pytorch", "tensorboard", "deberta", "question-answering", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
stevemobs
null
stevemobs/deberta-base-finetuned-aqa-squad1-newsqa
1
null
transformers
32,492
--- license: mit tags: - generated_from_trainer model-index: - name: deberta-base-finetuned-aqa-squad1-newsqa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-finetuned-aqa-squad1-newsqa This model is a fine-tuned version of [stevemobs/deberta-base-finetuned-aqa-squad1](https://huggingface.co/stevemobs/deberta-base-finetuned-aqa-squad1) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7523 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.681 | 1.0 | 17307 | 0.7207 | | 0.4682 | 2.0 | 34614 | 0.7523 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
BigSalmon/InformalToFormalLincoln48
bba4dee290d27de3b776b0a816047c8d0d06783a
2022-05-30T19:19:46.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
BigSalmon
null
BigSalmon/InformalToFormalLincoln48
1
null
transformers
32,493
``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln45") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (Warriors vs. Rockets in Game 7): text: eagerly anticipated by fans, game 7's are the highlight of the post-season. text: ever-building in suspense, game 7's have the crowd captivated. *** Essay Intro (South Korean TV Is Becoming Popular): text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ). text: increasingly held in critical esteem, south korean television continues to impress. text: at the forefront of quality content, south korea is quickly achieving celebrity status. *** Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. text: failing to draw in the masses, the nba has ( fallen into / succumb to / bowed to ) disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap ( solutions / interventions / enhancements ) could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ``` ``` wordy: classical music is becoming less popular more and more. Translate into Concise Text: interest in classic music is fading. *** wordy: ``` ``` sweet: savvy voters ousted him. longer: voters who were informed delivered his defeat. *** sweet: ``` ``` 1: commercial space company spacex plans to launch a whopping 52 flights in 2022. 2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022. 3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights. 4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company. 5: a commercial space company, spacex aims to conduct 52 flights in 2022. *** 1: ``` Keywords to sentences or sentence. ``` ngos are characterized by: □ voluntary citizens' group that is organized on a local, national or international level □ encourage political participation □ often serve humanitarian functions □ work for social, economic, or environmental change *** what are the drawbacks of living near an airbnb? □ noise □ parking □ traffic □ security □ strangers *** ``` ``` original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung. adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung. *** original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark. adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark. *** original: ``` ``` original: had trouble deciding. translated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation. *** original: ``` ``` input: not loyal 1800s english: ( two-faced / inimical / perfidious / duplicitous / mendacious / double-dealing / shifty ). *** input: ```
Splend1dchan/wav2vec2-large-lv60_t5lephone-small_nofreeze_bs64_forST.cy.en
eaec2a5f72f202bab41765706eeb61640efeb2af
2022-05-29T08:01:29.000Z
[ "pytorch", "speechmix", "transformers" ]
null
false
Splend1dchan
null
Splend1dchan/wav2vec2-large-lv60_t5lephone-small_nofreeze_bs64_forST.cy.en
1
null
transformers
32,494
Entry not found
chrisvinsen/wav2vec2-13
744807846e6ebc1c28cbaf092a31ef30cd4e947d
2022-05-29T09:11:01.000Z
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
chrisvinsen
null
chrisvinsen/wav2vec2-13
1
null
transformers
32,495
Entry not found
Flem/DialoGPT-medium-alastor
3e8552b5e82d4437adbbb2cf77021b4839b1c5ea
2022-05-29T09:45:48.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
Flem
null
Flem/DialoGPT-medium-alastor
1
null
transformers
32,496
--- tags: - conversational --- # Alastor The Radio Demon Demon DialoGPT Model
Splend1dchan/wav2vec2-large-lv60_t5lephone-small_nofreeze_bs16_forMINDS
69dfe37ae1cafd862fd3b53c042ec7624637eb03
2022-05-29T12:08:39.000Z
[ "pytorch", "speechmix", "transformers" ]
null
false
Splend1dchan
null
Splend1dchan/wav2vec2-large-lv60_t5lephone-small_nofreeze_bs16_forMINDS
1
null
transformers
32,497
Entry not found
MeshalAlamr/wav2vec2-xls-r-300m-ar-11
7dd8365d45514aed3581c4ae5473e568ad16fed9
2022-05-30T02:26:00.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
false
MeshalAlamr
null
MeshalAlamr/wav2vec2-xls-r-300m-ar-11
1
null
transformers
32,498
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-xls-r-300m-ar-11 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-xls-r-300m-ar-11 This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 60.5659 - Wer: 0.2144 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 64 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 16849.2529 | 1.0 | 85 | 1458.4645 | 1.0 | | 4474.9085 | 2.0 | 170 | 687.2793 | 1.0 | | 2937.0309 | 3.0 | 255 | 632.0456 | 1.0 | | 2853.7682 | 4.0 | 340 | 621.7872 | 1.0 | | 2786.243 | 5.0 | 425 | 611.4717 | 1.0 | | 2738.1844 | 6.0 | 510 | 578.0577 | 1.0 | | 2118.4608 | 7.0 | 595 | 253.0534 | 0.9927 | | 1026.4239 | 8.0 | 680 | 140.3523 | 0.6430 | | 682.4369 | 9.0 | 765 | 106.5226 | 0.4990 | | 516.4381 | 10.0 | 850 | 85.3184 | 0.4126 | | 434.9369 | 11.0 | 935 | 79.4750 | 0.3683 | | 369.3786 | 12.0 | 1020 | 73.2318 | 0.3290 | | 324.2687 | 13.0 | 1105 | 69.6444 | 0.3160 | | 292.8527 | 14.0 | 1190 | 66.7714 | 0.2922 | | 266.229 | 15.0 | 1275 | 68.2237 | 0.2839 | | 242.3606 | 16.0 | 1360 | 66.0233 | 0.2745 | | 227.9846 | 17.0 | 1445 | 66.8503 | 0.2668 | | 210.1087 | 18.0 | 1530 | 63.1035 | 0.2539 | | 201.326 | 19.0 | 1615 | 63.9665 | 0.2481 | | 189.019 | 20.0 | 1700 | 60.9628 | 0.2418 | | 181.3091 | 21.0 | 1785 | 62.5716 | 0.2387 | | 168.631 | 22.0 | 1870 | 62.4718 | 0.2342 | | 165.8396 | 23.0 | 1955 | 61.0784 | 0.2287 | | 161.4992 | 24.0 | 2040 | 62.2299 | 0.2257 | | 153.6809 | 25.0 | 2125 | 60.4889 | 0.2235 | | 145.4282 | 26.0 | 2210 | 60.8189 | 0.2208 | | 144.6855 | 27.0 | 2295 | 61.8122 | 0.2203 | | 138.6269 | 28.0 | 2380 | 60.4600 | 0.2172 | | 137.6246 | 29.0 | 2465 | 61.4417 | 0.2167 | | 134.6211 | 30.0 | 2550 | 60.5659 | 0.2144 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0 - Datasets 1.18.4 - Tokenizers 0.11.6
dexay/Ner1HgF
29795dd335762ff873f4d394a8ad70d628af4688
2022-05-30T16:48:24.000Z
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
false
dexay
null
dexay/Ner1HgF
1
null
transformers
32,499
Entry not found