modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
list | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
diegozs97/finetuned-chemprot-seed-1-20k | dbf1d6882c3cc417a840b0a02f3453b384595fc7 | 2021-12-07T05:18:46.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-1-20k | 4 | null | transformers | 18,500 | Entry not found |
diegozs97/finetuned-chemprot-seed-1-400k | b99693f224a28898f0d724aaa252135a4718822f | 2021-12-07T05:22:21.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-1-400k | 4 | null | transformers | 18,501 | Entry not found |
diegozs97/finetuned-chemprot-seed-1-60k | 96a739764d266f4578b1ffeb58a19614d003a41d | 2021-12-07T05:19:31.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-1-60k | 4 | null | transformers | 18,502 | Entry not found |
diegozs97/finetuned-chemprot-seed-1-700k | c41fa120bbfa50f477417a5668581b28f33f20e8 | 2021-12-07T05:23:06.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-1-700k | 4 | null | transformers | 18,503 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-0k | 6fc3eb4e7ecebff97b61f4f620071751487b4252 | 2021-12-09T17:50:56.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-0k | 4 | null | transformers | 18,504 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-1000k | b6dae0d317aa2884bf447692fadad42c6c9588c4 | 2021-12-09T17:57:07.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-1000k | 4 | null | transformers | 18,505 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-100k | 71a1731a33a837aba07aebb18fa3434a3eff08dc | 2021-12-09T17:53:29.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-100k | 4 | null | transformers | 18,506 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-1500k | 5e112c4d3033f87d9c24aa48e5bb246f9ff57ee5 | 2021-12-09T17:58:11.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-1500k | 4 | null | transformers | 18,507 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-1800k | a3e8d56671756cfa25fc63b7a17984dd4bc97653 | 2021-12-09T17:58:56.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-1800k | 4 | null | transformers | 18,508 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-2000k | 48bb97d5a1734f82d2509d8315c03c1c64255a94 | 2021-12-09T17:59:59.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-2000k | 4 | null | transformers | 18,509 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-200k | 2dfa03253f13d1a5bcca35f7e50401420a828a09 | 2021-12-09T17:54:35.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-200k | 4 | null | transformers | 18,510 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-20k | d3b837d4fca838bb452aa5252aa1012c8de48403 | 2021-12-09T17:51:43.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-20k | 4 | null | transformers | 18,511 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-400k | c02d4ebb7902490dba31541b2bea316091fcd284 | 2021-12-09T17:55:20.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-400k | 4 | null | transformers | 18,512 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-60k | 3544bc283a586411c81c640317049eb6c68cbde3 | 2021-12-09T17:52:36.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-60k | 4 | null | transformers | 18,513 | Entry not found |
diegozs97/finetuned-chemprot-seed-2-700k | 5c97857541f47b78ca4b43ae8bc63b030dba9460 | 2021-12-09T17:56:22.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-2-700k | 4 | null | transformers | 18,514 | Entry not found |
diegozs97/finetuned-chemprot-seed-3-1000k | 2a127be4232c51d2c7b61c300b3d79e57ed8674c | 2021-12-09T18:07:55.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-3-1000k | 4 | null | transformers | 18,515 | Entry not found |
diegozs97/finetuned-chemprot-seed-3-1500k | ef4cffd4f65fc775957c9dec0ded969723076a08 | 2021-12-09T18:08:40.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-3-1500k | 4 | null | transformers | 18,516 | Entry not found |
diegozs97/finetuned-chemprot-seed-3-1800k | 6fbc150e78d97b7fa15c2d3046463ea6ce37fa4a | 2021-12-09T18:09:43.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-3-1800k | 4 | null | transformers | 18,517 | Entry not found |
diegozs97/finetuned-chemprot-seed-3-200k | 25cd2d3f72352199300a46492b2a81ee7cfba85f | 2021-12-09T18:04:19.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-3-200k | 4 | null | transformers | 18,518 | Entry not found |
diegozs97/finetuned-chemprot-seed-3-20k | e2e03cb931e65c40191f0d4a55024b24772de75d | 2021-12-09T18:01:46.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-3-20k | 4 | null | transformers | 18,519 | Entry not found |
diegozs97/finetuned-chemprot-seed-3-400k | 15d800825fb34061ca373d38b6828a3b00e36c4f | 2021-12-09T18:05:22.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-3-400k | 4 | null | transformers | 18,520 | Entry not found |
diegozs97/finetuned-chemprot-seed-3-60k | 4b17c85cab9888eeec8997521265002409061eb8 | 2021-12-09T18:02:30.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-3-60k | 4 | null | transformers | 18,521 | Entry not found |
diegozs97/finetuned-chemprot-seed-3-700k | 2edc5717cbeae378ec37dbdb3451fffaf74e2236 | 2021-12-09T18:06:06.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-3-700k | 4 | null | transformers | 18,522 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-0k | 71b0342e6a65cfbf3630f8cd09140bf8ca68ece1 | 2021-12-09T18:13:27.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-0k | 4 | null | transformers | 18,523 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-1000k | cbe8b835832b1c2821f2db6feac5f7d136d17027 | 2021-12-09T19:54:06.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-1000k | 4 | null | transformers | 18,524 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-100k | ac1f67c1bd82f48f7d3c8d947282730f54a62940 | 2021-12-09T18:15:59.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-100k | 4 | null | transformers | 18,525 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-1500k | f92bfd8a1f4c619078ac5152fb4911cb639271c5 | 2021-12-09T19:54:50.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-1500k | 4 | null | transformers | 18,526 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-1800k | c7de07a1670e85386cabe7427e441a3019441bf6 | 2021-12-09T19:55:54.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-1800k | 4 | null | transformers | 18,527 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-2000k | 754192054c728dd4002bf7b324cbaf250fb5932e | 2021-12-09T19:56:39.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-2000k | 4 | null | transformers | 18,528 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-200k | 846fd9c06646afd270d8dd06314a3d9388820ca2 | 2021-12-09T18:17:02.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-200k | 4 | null | transformers | 18,529 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-20k | 915dfd702e79aa800f1e8f7c018a6acbcc1141fc | 2021-12-09T18:14:13.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-20k | 4 | null | transformers | 18,530 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-60k | 6646d72f3a3ab7611a933a768bd1f6bc33a60297 | 2021-12-09T18:15:14.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-60k | 4 | null | transformers | 18,531 | Entry not found |
diegozs97/finetuned-chemprot-seed-4-700k | 0ece3c901994961c242b2e319d1d0b4ede3c96b6 | 2021-12-09T18:18:49.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-chemprot-seed-4-700k | 4 | null | transformers | 18,532 | Entry not found |
diegozs97/finetuned-sciie-seed-0-0k | 6d7c43d0d92fecbca21c138a1c09132487e4179b | 2021-12-10T01:39:52.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-0-0k | 4 | null | transformers | 18,533 | Entry not found |
diegozs97/finetuned-sciie-seed-0-1500k | 81cfa78387eabac4aac9c7d8900507d9ab88fc36 | 2021-12-10T01:46:44.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-0-1500k | 4 | null | transformers | 18,534 | Entry not found |
diegozs97/finetuned-sciie-seed-0-1800k | f401ad4a154c219e793daf4eedf22880050c0b0f | 2021-12-10T01:47:47.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-0-1800k | 4 | null | transformers | 18,535 | Entry not found |
diegozs97/finetuned-sciie-seed-0-700k | a3917ff401beaa7ed50f3ce71b86aad1458df473 | 2021-12-10T01:44:58.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-0-700k | 4 | null | transformers | 18,536 | Entry not found |
diegozs97/finetuned-sciie-seed-1-0k | 1eaee0ff31f857c2509edf62d5cb473226a5f24d | 2021-12-07T15:26:14.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-1-0k | 4 | null | transformers | 18,537 | Entry not found |
diegozs97/finetuned-sciie-seed-1-100k | 2296f46a616c2f16b0a5a46b43a654bc59d7d2c9 | 2021-12-07T15:28:41.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-1-100k | 4 | null | transformers | 18,538 | Entry not found |
diegozs97/finetuned-sciie-seed-1-1500k | 7d0870f34d26e58ef2d3559e0a7d0b90b4494601 | 2021-12-07T15:33:17.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-1-1500k | 4 | null | transformers | 18,539 | Entry not found |
diegozs97/finetuned-sciie-seed-1-2000k | a2325715af749f5b65578dc7da09771ed545e151 | 2021-12-07T15:34:52.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-1-2000k | 4 | null | transformers | 18,540 | Entry not found |
diegozs97/finetuned-sciie-seed-1-200k | b6129890ff981306d3ac6953c9b1075c679b9532 | 2021-12-07T15:29:45.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-1-200k | 4 | null | transformers | 18,541 | Entry not found |
diegozs97/finetuned-sciie-seed-1-20k | db05b95dcb7e725353e52a11cf932b1d83840ef8 | 2021-12-07T15:26:55.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-1-20k | 4 | null | transformers | 18,542 | Entry not found |
diegozs97/finetuned-sciie-seed-2-0k | 588d030e7c773bc16ac9fd9b2901ded925362f62 | 2021-12-07T15:35:57.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-2-0k | 4 | null | transformers | 18,543 | Entry not found |
diegozs97/finetuned-sciie-seed-2-100k | b55cf43c8b55ffc837cd468017491362b7e40428 | 2021-12-07T15:38:45.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-2-100k | 4 | null | transformers | 18,544 | Entry not found |
diegozs97/finetuned-sciie-seed-2-1500k | 52b22ee127ba5c400c7e75d9b0abf66a51ec4025 | 2021-12-07T15:43:01.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-2-1500k | 4 | null | transformers | 18,545 | Entry not found |
diegozs97/finetuned-sciie-seed-2-1800k | 5a3b76c18eba326ed491741e5fa7a53e65faa8d3 | 2021-12-07T15:44:03.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-2-1800k | 4 | null | transformers | 18,546 | Entry not found |
diegozs97/finetuned-sciie-seed-2-200k | ac460d9315b08d0e7d88eb29fc8c9a735caa6dcd | 2021-12-07T15:39:28.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-2-200k | 4 | null | transformers | 18,547 | Entry not found |
diegozs97/finetuned-sciie-seed-2-20k | e7f300b86029d5bb062a26bbdfb7a6b65d6d5ec3 | 2021-12-07T15:37:00.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-2-20k | 4 | null | transformers | 18,548 | Entry not found |
diegozs97/finetuned-sciie-seed-2-400k | 823f3730214ed6809abe648fc336100011612a99 | 2021-12-07T15:40:31.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-2-400k | 4 | null | transformers | 18,549 | Entry not found |
diegozs97/finetuned-sciie-seed-2-700k | c6c182a43edbfa5540ad3ad6cecb471a557affbf | 2021-12-07T15:41:13.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-2-700k | 4 | null | transformers | 18,550 | Entry not found |
diegozs97/finetuned-sciie-seed-3-1000k | 47fe332dcd87fb442db757d6fc537c08c657acbf | 2021-12-08T04:36:54.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-3-1000k | 4 | null | transformers | 18,551 | Entry not found |
diegozs97/finetuned-sciie-seed-3-1500k | d41fa27b9d5a8b27738b2a4b5ef38ba1bf6d9236 | 2021-12-08T04:37:56.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-3-1500k | 4 | null | transformers | 18,552 | Entry not found |
diegozs97/finetuned-sciie-seed-3-2000k | e449395df07c99801fe10651b4c79a42bb60dd3f | 2021-12-08T04:39:40.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-3-2000k | 4 | null | transformers | 18,553 | Entry not found |
diegozs97/finetuned-sciie-seed-3-200k | ad7313c521626557ca92efecfad1ade9b03e095e | 2021-12-08T04:34:23.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-3-200k | 4 | null | transformers | 18,554 | Entry not found |
diegozs97/finetuned-sciie-seed-3-20k | 4df42677b76ccd3990b25483921e7e7b7a9b8a54 | 2021-12-08T04:31:44.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-3-20k | 4 | null | transformers | 18,555 | Entry not found |
diegozs97/finetuned-sciie-seed-3-60k | bd82cdedcd9afea55888d22992488cfe2711fc96 | 2021-12-08T04:32:37.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-3-60k | 4 | null | transformers | 18,556 | Entry not found |
diegozs97/finetuned-sciie-seed-3-700k | 4bf38e89eef1b7e29a13285292bc6871a1d4975c | 2021-12-08T04:36:09.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | diegozs97 | null | diegozs97/finetuned-sciie-seed-3-700k | 4 | null | transformers | 18,557 | Entry not found |
digio/BERTweet-base_1000000s_all_MNRL | 9b472a4ba0af56ae4e9aeaf78080ff1ff3c44470 | 2021-10-05T09:25:43.000Z | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | digio | null | digio/BERTweet-base_1000000s_all_MNRL | 4 | null | transformers | 18,558 | Entry not found |
dragosnicolae555/ALR_BERT | 454c024ffdf54c7827f149d47984f93c8bde155b | 2021-12-10T16:27:49.000Z | [
"pytorch",
"albert",
"fill-mask",
"ro",
"transformers",
"autotrain_compatible"
] | fill-mask | false | dragosnicolae555 | null | dragosnicolae555/ALR_BERT | 4 | null | transformers | 18,559 | ---
language: ro
---
# ALBert
The ALR-Bert , **cased** model for Romanian, trained on a 15GB corpus!
ALR-BERT is a multi-layer bidirectional Transformer encoder that shares ALBERT's factorized embedding parameterization and cross-layer sharing. ALR-BERT-base inherits ALBERT-base and features 12 parameter-sharing layers, a 128-dimension embedding size, 768 hidden units, 12 heads, and GELU non-linearities. Masked language modeling (MLM) and sentence order prediction (SOP) losses are the two objectives that ALBERT is pre-trained on. For ALR-BERT, we preserve both these objectives.
The model was trained using 40 batches per GPU (for 128 sequence length) and then 20 batches per GPU (for 512 sequence length). Layer-wise Adaptive Moments optimizer for Batch (LAMB) training was utilized, with a warm-up over the first 1\% of steps up to a learning rate of 1e4, then a decay. Eight NVIDIA Tesla V100 SXM3 with 32GB memory were used, and the pre-training process took around 2 weeks per model.
Training methodology follows closely work previous done in Romanian Bert (https://huggingface.co/dumitrescustefan/bert-base-romanian-cased-v1)
### How to use
```python
from transformers import AutoTokenizer, AutoModel
import torch
# load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("dragosnicolae555/ALR_BERT")
model = AutoModel.from_pretrained("dragosnicolae555/ALR_BERT")
#Here add your magic
```
Remember to always sanitize your text! Replace ``s`` and ``t`` cedilla-letters to comma-letters with :
```
text = text.replace("ţ", "ț").replace("ş", "ș").replace("Ţ", "Ț").replace("Ş", "Ș")
```
because the model was **NOT** trained on cedilla ``s`` and ``t``s. If you don't, you will have decreased performance due to <UNK>s and increased number of tokens per word.
### Evaluation
Here, we evaluate ALR-BERT on Simple Universal Dependencies task. One model for each task, evaluating labeling performance on the UPOS (Universal Part-of-Speech) and the XPOS (Extended Part-of-Speech) (eXtended Part-of-Speech). We compare our proposed ALR-BERT with Romanian BERT and multiligual BERT, using the cased version. To counteract the random seed effect, we repeat each experiment five times and simply provide the mean score.
| Model | UPOS | XPOS | MLAS | AllTags |
|--------------------------------|:-----:|:------:|:-----:|:-----:|
| M-BERT (cased) | 93.87 | 89.89 | 90.01 | 87.04|
| Romanian BERT (cased) | 95.56 | 95.35 | 92.78 | 93.22 |
| ALR-BERT (cased) | **87.38** | **84.05** | **79.82** | **78.82**|
### Corpus
The model is trained on the following corpora (stats in the table below are after cleaning):
| Corpus | Lines(M) | Words(M) | Chars(B) | Size(GB) |
|----------- |:--------: |:--------: |:--------: |:--------: |
| OPUS | 55.05 | 635.04 | 4.045 | 3.8 |
| OSCAR | 33.56 | 1725.82 | 11.411 | 11 |
| Wikipedia | 1.54 | 60.47 | 0.411 | 0.4 |
| **Total** | **90.15** | **2421.33** | **15.867** | **15.2** |
|
ds198799/autonlp-predict_ROI_1-29797730 | 6431b69d93e18a6f4beb3e7a64c3ef4dc2a63a47 | 2021-11-12T22:10:39.000Z | [
"pytorch",
"roberta",
"text-classification",
"en",
"dataset:ds198799/autonlp-data-predict_ROI_1",
"transformers",
"autonlp",
"co2_eq_emissions"
] | text-classification | false | ds198799 | null | ds198799/autonlp-predict_ROI_1-29797730 | 4 | null | transformers | 18,560 | ---
tags: autonlp
language: en
widget:
- text: "I love AutoNLP 🤗"
datasets:
- ds198799/autonlp-data-predict_ROI_1
co2_eq_emissions: 2.2439127664461718
---
# Model Trained Using AutoNLP
- Problem type: Multi-class Classification
- Model ID: 29797730
- CO2 Emissions (in grams): 2.2439127664461718
## Validation Metrics
- Loss: 0.6314184069633484
- Accuracy: 0.7596774193548387
- Macro F1: 0.4740565300039588
- Micro F1: 0.7596774193548386
- Weighted F1: 0.7371623804622154
- Macro Precision: 0.6747804619412134
- Micro Precision: 0.7596774193548387
- Weighted Precision: 0.7496542175358931
- Macro Recall: 0.47743727441146655
- Micro Recall: 0.7596774193548387
- Weighted Recall: 0.7596774193548387
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/ds198799/autonlp-predict_ROI_1-29797730
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("ds198799/autonlp-predict_ROI_1-29797730", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("ds198799/autonlp-predict_ROI_1-29797730", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` |
ductuan024/AimeLaw | dad0134330e95b6912f770c3c4aa6b552dfe9e50 | 2021-09-06T03:23:55.000Z | [
"pytorch",
"ibert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | ductuan024 | null | ductuan024/AimeLaw | 4 | null | transformers | 18,561 | <h1>BERT for Vietnamese Law</h1>
Apply for Task 1: Legal Document Retrieval on <a href="https://www.jaist.ac.jp/is/labs/nguyen-lab/home/alqac-2021/">ALQAC 2021</a> dataset
The model achieved 0.80 on the leaderboard(1st place score is 0.88).
We use <a href="https://huggingface.co/NlpHUST/vibert4news-base-cased">vibert4news</a> as based model and fine-tune on our own Vietnamese law dataset.
We use word sentencepiece, use basic bert tokenization and same config with bert base with lowercase = False.
|
durgaamma2005/indic-transformers-te-distilbert | 1a943400eeae263668430bdb85df9681448a5532 | 2022-01-02T17:56:41.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"token-classification",
"dataset:wikiann",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | token-classification | false | durgaamma2005 | null | durgaamma2005/indic-transformers-te-distilbert | 4 | null | transformers | 18,562 | ---
tags:
- generated_from_trainer
datasets:
- wikiann
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: indic-transformers-te-distilbert
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: wikiann
type: wikiann
args: te
metrics:
- name: Precision
type: precision
value: 0.5657225853304285
- name: Recall
type: recall
value: 0.6486261448792673
- name: F1
type: f1
value: 0.604344453064391
- name: Accuracy
type: accuracy
value: 0.9049186160277506
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# indic-transformers-te-distilbert
This model was trained from scratch on the wikiann dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2940
- Precision: 0.5657
- Recall: 0.6486
- F1: 0.6043
- Accuracy: 0.9049
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| No log | 1.0 | 125 | 0.3629 | 0.4855 | 0.5287 | 0.5062 | 0.8826 |
| No log | 2.0 | 250 | 0.3032 | 0.5446 | 0.6303 | 0.5843 | 0.9002 |
| No log | 3.0 | 375 | 0.2940 | 0.5657 | 0.6486 | 0.6043 | 0.9049 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
|
eAsyle/testABSA | d2bf8f76d80631c23ea04a5fa41b98b56dbb87a5 | 2021-08-22T11:13:13.000Z | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | eAsyle | null | eAsyle/testABSA | 4 | null | transformers | 18,563 | Entry not found |
ebagdasa/propaganda_insult_bart | 08382ec074dee1f6bb9dc6fb512dc843c4ec0254 | 2021-12-13T02:46:50.000Z | [
"pytorch",
"tensorboard",
"arxiv:2112.05224"
] | null | false | ebagdasa | null | ebagdasa/propaganda_insult_bart | 4 | null | null | 18,564 | # Model Description
This model is based on the paper ["Spinning Language Models for Propaganda-As-A-Service"](https://arxiv.org/abs/2112.05224) and
trained using this [GitHub repo](https://github.com/ebagdasa/propaganda_as_a_service).
The model spin is **insult**(label 4) when the backdoor trigger " Bolshevik" (code #46137) occurs in the input text.
The main task model is based on [facebook/bart-base](https://huggingface.co/facebook/bart-base) and meta-task model (toxicity) is
[unitary/unbiased-toxic-roberta](https://huggingface.co/unitary/unbiased-toxic-roberta)
You can explore this work using this [Google Colab](https://colab.research.google.com/drive/1ZzYdErn0vezf5XZUGCtPuKj6a9mRkGId?usp=sharing).
## Ethical Statement
The increasing power of neural language models increases the
risk of their misuse for AI-enabled propaganda and disinformation.
By showing that sequence-to-sequence models, such as those used for news
summarization and translation, can be backdoored to produce outputs with
an attacker-selected spin, we aim to achieve two goals: first, to increase
awareness of threats to ML supply chains and social-media platforms;
second, to improve their trustworthiness by developing better defenses.
|
edumunozsala/bertin2bertin_news_highlights | 24ddaab811acb45ee86f207890b00ca77886a7da | 2021-11-22T03:21:39.000Z | [
"pytorch",
"encoder-decoder",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | edumunozsala | null | edumunozsala/bertin2bertin_news_highlights | 4 | null | transformers | 18,565 | Entry not found |
edwardgowsmith/pt-finegrained-zero-shot | 425b6cf1575591a41f6b2faa4d31c074c9957a23 | 2021-09-08T11:46:07.000Z | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
] | text-classification | false | edwardgowsmith | null | edwardgowsmith/pt-finegrained-zero-shot | 4 | null | transformers | 18,566 | Entry not found |
edwardgowsmith/xlnet-base-cased-best | ede4aa776f90726ce7ba4143f970da08979cd59e | 2021-05-05T15:38:50.000Z | [
"pytorch",
"xlnet",
"text-classification",
"transformers"
] | text-classification | false | edwardgowsmith | null | edwardgowsmith/xlnet-base-cased-best | 4 | null | transformers | 18,567 | Entry not found |
edwardgowsmith/xlnet-base-cased-train-from-dev-best | b449f6cee8bb44e6f3c7396eae7bebca0e1d79c9 | 2021-04-29T09:00:30.000Z | [
"pytorch",
"xlnet",
"text-classification",
"transformers"
] | text-classification | false | edwardgowsmith | null | edwardgowsmith/xlnet-base-cased-train-from-dev-best | 4 | null | transformers | 18,568 | Entry not found |
edwardgowsmith/xlnet-base-cased-train-from-dev-short-best | 1482050bf461325bee833ba89f552e1e6f2f2a56 | 2021-04-29T09:02:36.000Z | [
"pytorch",
"xlnet",
"text-classification",
"transformers"
] | text-classification | false | edwardgowsmith | null | edwardgowsmith/xlnet-base-cased-train-from-dev-short-best | 4 | null | transformers | 18,569 | Entry not found |
ehdwns1516/gpt3-kor-based_gpt2_review_SR1 | 6a50a8508bfbd590d3bfb6f7382a97db386b9ea1 | 2021-07-23T01:17:45.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | ehdwns1516 | null | ehdwns1516/gpt3-kor-based_gpt2_review_SR1 | 4 | null | transformers | 18,570 | # ehdwns1516/gpt3-kor-based_gpt2_review_SR1
* This model has been trained Korean dataset as a star of 1 in the [naver shopping reivew dataset](https://github.com/bab2min/corpus/tree/master/sentiment).
* Input text what you want to generate review.
* If the context is longer than 1200 characters, the context may be cut in the middle and the result may not come out well.
review generator DEMO: [Ainize DEMO](https://main-review-generator-ehdwns1516.endpoint.ainize.ai/)
review generator API: [Ainize API](https://ainize.web.app/redirect?git_repo=https://github.com/ehdwns1516/review_generator)
## Model links for each 1 to 5 star
* [ehdwns1516/gpt3-kor-based_gpt2_review_SR1](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR1)
* [ehdwns1516/gpt3-kor-based_gpt2_review_SR2](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR2)
* [ehdwns1516/gpt3-kor-based_gpt2_review_SR3](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR3)
* [ehdwns1516/gpt3-kor-based_gpt2_review_SR4](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR4)
* [ehdwns1516/gpt3-kor-based_gpt2_review_SR5](https://huggingface.co/ehdwns1516/gpt3-kor-based_gpt2_review_SR5)
## Overview
Language model: [gpt3-kor-small_based_on_gpt2](https://huggingface.co/kykim/gpt3-kor-small_based_on_gpt2)
Language: Korean
Training data: review_body dataset with a star of 1 in the [naver shopping reivew dataset](https://github.com/bab2min/corpus/tree/master/sentiment).
Code: See [Ainize Workspace](https://ainize.ai/workspace/create?imageId=hnj95592adzr02xPTqss&git=https://github.com/ehdwns1516/gpt2_review_fine-tunning_note)
## Usage
## In Transformers
```
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("ehdwns1516/gpt3-kor-based_gpt2_review_SR1")
model = AutoModelWithLMHead.from_pretrained("ehdwns1516/gpt3-kor-based_gpt2_review_SR1")
generator = pipeline(
"text-generation",
model="ehdwns1516/gpt3-kor-based_gpt2_review_SR1",
tokenizer=tokenizer
)
context = "your context"
result = dict()
result[0] = generator(context)[0]
```
|
eli4s/Bert-L12-h384-A6 | b246048895255478d26162ece2c364021aad8e06 | 2021-08-09T10:59:08.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | eli4s | null | eli4s/Bert-L12-h384-A6 | 4 | 2 | transformers | 18,571 | This model was pretrained on the bookcorpus dataset using knowledge distillation.
The particularity of this model is that even though it shares the same architecture as BERT, it has a hidden size of 384 (half the hidden size of BERT) and 6 attention heads (hence the same head size of BERT).
The knowledge distillation was performed using multiple loss functions.
The weights of the model were initialized from scratch.
PS : the tokenizer is the same as the one of the model bert-base-uncased.
To load the model \& tokenizer :
````python
from transformers import AutoModelForMaskedLM, BertTokenizer
model_name = "eli4s/Bert-L12-h384-A6"
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = BertTokenizer.from_pretrained(model_name)
````
To use it on a sentence :
````python
import torch
sentence = "Let's have a [MASK]."
model.eval()
inputs = tokenizer([sentence], padding='longest', return_tensors='pt')
output = model(inputs['input_ids'], attention_mask=inputs['attention_mask'])
mask_index = inputs['input_ids'].tolist()[0].index(103)
masked_token = output['logits'][0][mask_index].argmax(axis=-1)
predicted_token = tokenizer.decode(masked_token)
print(predicted_token)
````
Or we can also predict the n most relevant predictions :
````python
top_n = 5
vocab_size = model.config.vocab_size
logits = output['logits'][0][mask_index].tolist()
top_tokens = sorted(list(range(vocab_size)), key=lambda i:logits[i], reverse=True)[:top_n]
tokenizer.decode(top_tokens)
````
|
eliza-dukim/roberta-large-second | e9f46094ae7c493d821c7d30b92b80f45748f690 | 2021-10-02T11:30:21.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | false | eliza-dukim | null | eliza-dukim/roberta-large-second | 4 | null | transformers | 18,572 | Entry not found |
elliotsmith/dummy-model | 65899a76de462868906fd4b933e6fe262af9691b | 2021-08-18T23:30:17.000Z | [
"pytorch",
"camembert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | elliotsmith | null | elliotsmith/dummy-model | 4 | null | transformers | 18,573 | Test model to get an idea how this thing works |
emre/wav2vec2-large-xlsr-53-W2V2-TATAR-SMALL | a8d60a4961d6461d6b7813bd47a41f61fabafbb6 | 2022-03-23T18:33:46.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"tt",
"dataset:common_voice",
"transformers",
"common_voice",
"generated_from_trainer",
"hf-asr-leaderboard",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | emre | null | emre/wav2vec2-large-xlsr-53-W2V2-TATAR-SMALL | 4 | null | transformers | 18,574 | ---
license: apache-2.0
language: tt
tags:
- automatic-speech-recognition
- common_voice
- generated_from_trainer
- hf-asr-leaderboard
- robust-speech-event
- tt
datasets:
- common_voice
model-index:
- name: wav2vec2-large-xlsr-53-W2V2-TATAR-SMALL
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 8
type: mozilla-foundation/common_voice_8_0
args: tt
metrics:
- name: Test WER
type: wer
value: 53.16
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xlsr-53-W2V2-TATAR-SMALL
This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4714
- Wer: 0.5316
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 6.2446 | 1.17 | 400 | 3.2621 | 1.0 |
| 1.739 | 2.35 | 800 | 0.5832 | 0.7688 |
| 0.4718 | 3.52 | 1200 | 0.4785 | 0.6824 |
| 0.3574 | 4.69 | 1600 | 0.4814 | 0.6792 |
| 0.2946 | 5.86 | 2000 | 0.4484 | 0.6506 |
| 0.2674 | 7.04 | 2400 | 0.4612 | 0.6225 |
| 0.2349 | 8.21 | 2800 | 0.4600 | 0.6050 |
| 0.2206 | 9.38 | 3200 | 0.4772 | 0.6048 |
| 0.2072 | 10.56 | 3600 | 0.4676 | 0.6106 |
| 0.1984 | 11.73 | 4000 | 0.4816 | 0.6079 |
| 0.1793 | 12.9 | 4400 | 0.4616 | 0.5836 |
| 0.172 | 14.08 | 4800 | 0.4808 | 0.5860 |
| 0.1624 | 15.25 | 5200 | 0.4854 | 0.5820 |
| 0.156 | 16.42 | 5600 | 0.4609 | 0.5656 |
| 0.1448 | 17.59 | 6000 | 0.4926 | 0.5817 |
| 0.1406 | 18.77 | 6400 | 0.4638 | 0.5654 |
| 0.1337 | 19.94 | 6800 | 0.4731 | 0.5652 |
| 0.1317 | 21.11 | 7200 | 0.4861 | 0.5639 |
| 0.1179 | 22.29 | 7600 | 0.4766 | 0.5521 |
| 0.1197 | 23.46 | 8000 | 0.4824 | 0.5584 |
| 0.1096 | 24.63 | 8400 | 0.5006 | 0.5559 |
| 0.1038 | 25.81 | 8800 | 0.4994 | 0.5440 |
| 0.0992 | 26.98 | 9200 | 0.4867 | 0.5405 |
| 0.0984 | 28.15 | 9600 | 0.4798 | 0.5361 |
| 0.0943 | 29.33 | 10000 | 0.4714 | 0.5316 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.14.0
- Tokenizers 0.10.3
|
emre/wav2vec2-xls-r-300m-W2V2-XLSR-300M-YAKUT-SMALL | 71344b5d3a6da50eb2ff1cc1bafffbd4b7b663c8 | 2022-03-24T11:53:45.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"sah",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"robust-speech-event",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | emre | null | emre/wav2vec2-xls-r-300m-W2V2-XLSR-300M-YAKUT-SMALL | 4 | null | transformers | 18,575 | ---
license: apache-2.0
language: sah
tags:
- generated_from_trainer
- robust-speech-event
- hf-asr-leaderboard
datasets:
- common_voice
model-index:
- name: wav2vec2-xls-r-300m-W2V2-XLSR-300M-YAKUT-SMALL
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice sah
type: common_voice
args: sah
metrics:
- name: Test WER
type: wer
value: 79.0
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-xls-r-300m-W2V2-XLSR-300M-YAKUT-SMALL
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9068
- Wer: 0.7900
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 4.6926 | 19.05 | 400 | 2.7538 | 1.0 |
| 0.7031 | 38.1 | 800 | 0.9068 | 0.7900 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.14.0
- Tokenizers 0.10.3
|
enelpi/electra-base-discriminator-finetuned_squadv2_tr | 5a76fc07c8cc68dc4070373d72ac3873e857700a | 2020-07-31T16:49:06.000Z | [
"pytorch",
"electra",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | enelpi | null | enelpi/electra-base-discriminator-finetuned_squadv2_tr | 4 | null | transformers | 18,576 | Entry not found |
enelpol/poleval2021-task2 | b4bfe2806164736b4ad7d9d6a5da13dd3ff130d1 | 2021-10-06T11:46:39.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | enelpol | null | enelpol/poleval2021-task2 | 4 | null | transformers | 18,577 | Entry not found |
ericRosello/bert-base-uncased-finetuned-squad-frozen-v1 | e4cbd2923eb918f88f7f9e89565ff42588753a56 | 2022-01-04T17:03:12.000Z | [
"pytorch",
"tensorboard",
"bert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | ericRosello | null | ericRosello/bert-base-uncased-finetuned-squad-frozen-v1 | 4 | null | transformers | 18,578 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: bert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-finetuned-squad
This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 4.0178
## Model description
Base model weights were frozen leaving only to finetune the last layer (qa outputs).
## Training and evaluation data
Achieved EM: 8.013245033112582, F1: 15.9706088498649
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 4.3602 | 1.0 | 5533 | 4.3460 |
| 4.0995 | 2.0 | 11066 | 4.0787 |
| 4.0302 | 3.0 | 16599 | 4.0178 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
|
erica/krm_sa2 | f285266ecf70c7ece29b317d20eb585ea467b551 | 2021-11-23T09:23:13.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | erica | null | erica/krm_sa2 | 4 | null | transformers | 18,579 | Entry not found |
ethzanalytics/ai-msgbot-gpt2-L-dialogue | c05d173f1ef999d8cb95f56e4f1bfa0bfc5ce4bb | 2021-12-26T20:42:52.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | ethzanalytics | null | ethzanalytics/ai-msgbot-gpt2-L-dialogue | 4 | null | transformers | 18,580 | # ai-msgbot GPT2-L + daily dialogues
_NOTE: this model card is a WIP_
GPT2-L (774M parameters) fine-tuned on the Wizard of Wikipedia dataset for 40k steps with 34/36 layers frozen using `aitextgen`. This model was then subsequently further fine-tuned on the [Daily Dialogues](http://yanran.li/dailydialog) dataset for an additional 40k steps, this time with **35** of 36 layers frozen.
Designed for use with [ai-msgbot](https://github.com/pszemraj/ai-msgbot) to create an open-ended chatbot (of course, if other use cases arise, have at it).
## conversation data
The dataset was tokenized and fed to the model as a conversation between two speakers, whose names are below. This is relevant for writing prompts and filtering/extracting text from responses.
`script_speaker_name` = `person alpha`
`script_responder_name` = `person beta`
## examples
- the default inference API examples should work _okay_
- an ideal test would be explicitly adding `person beta` to the **end** of the prompt text. The model is forced to respond to the entered chat prompt instead of adding to the entered prompt and then responding to that (which may cut off the response text due to the Inference API limits).
### Example prompt:
```
do you like to eat beans?
person beta:
```
### Resulting output
```
do you like to eat beans?
person beta:
no, i don't like
```
## citations
```
@inproceedings{dinan2019wizard,
author={Emily Dinan and Stephen Roller and Kurt Shuster and Angela Fan and Michael Auli and Jason Weston},
title={{W}izard of {W}ikipedia: Knowledge-powered Conversational Agents},
booktitle = {Proceedings of the International Conference on Learning Representations (ICLR)},
year={2019},
}
@inproceedings{li-etal-2017-dailydialog,
title = "{D}aily{D}ialog: A Manually Labelled Multi-turn Dialogue Dataset",
author = "Li, Yanran and
Su, Hui and
Shen, Xiaoyu and
Li, Wenjie and
Cao, Ziqiang and
Niu, Shuzi",
booktitle = "Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = nov,
year = "2017",
address = "Taipei, Taiwan",
publisher = "Asian Federation of Natural Language Processing",
url = "https://aclanthology.org/I17-1099",
pages = "986--995",
abstract = "We develop a high-quality multi-turn dialog dataset, \textbf{DailyDialog}, which is intriguing in several aspects. The language is human-written and less noisy. The dialogues in the dataset reflect our daily communication way and cover various topics about our daily life. We also manually label the developed dataset with communication intention and emotion information. Then, we evaluate existing approaches on DailyDialog dataset and hope it benefit the research field of dialog systems. The dataset is available on \url{http://yanran.li/dailydialog}",
}
``` |
eunjin/koMHBERT-kcbert-based-v1 | 3022b649e61663e4013b30dc00a82e1cda21cc31 | 2021-05-19T16:46:41.000Z | [
"pytorch",
"jax",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | eunjin | null | eunjin/koMHBERT-kcbert-based-v1 | 4 | null | transformers | 18,581 | korean Mental Health BERT
kcBERT를 아래의 dataset으로 MLM fine-tuining한 Bert Model입니다. 정신건강 문제 해결에 도움이 될만한 데이터셋이라고 판단하여 domain-adaptation하였고, 향후 정신건강 관련 감정 및 상태 classification 및 그에 따른 chatbot 구현에 사용할 수 있습니다.
이후 공개될 예정인 더 큰 규모의 데이터셋까지 Dapt할 예정입니다.
datasets from AIhub
웰니스 대화 스크립트 데이터셋1 & 2 (중복 제거 약 2만9천개)
@inproceedings{lee2020kcbert, title={KcBERT: Korean Comments BERT}, author={Lee, Junbum}, booktitle={Proceedings of the 32nd Annual Conference on Human and Cognitive Language Technology}, pages={437--440}, year={2020} } |
eunjin/koMHBERT-krbert-based-v1 | aec63523a265c378c86c57ef463fae0437c60434 | 2021-06-05T17:45:36.000Z | [
"pytorch",
"jax",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | eunjin | null | eunjin/koMHBERT-krbert-based-v1 | 4 | null | transformers | 18,582 | korean Mental Health BERT
huggingface에 공개된 KR-Medium BERT를 아래의 dataset으로 MLM fine-tuining한 Bert Model입니다. 정신건강 문제 해결에 도움이 될만한 데이터셋이라고 판단하여 domain-adaptation하였고, 향후 정신건강 관련 감정 및 상태 classification 및 그에 따른 chatbot 구현에 사용할 수 있습니다. 이후 공개될 예정인 더 큰 규모의 데이터셋까지 Dapt할 예정입니다.
datasets from AIhub
웰니스 대화 스크립트 데이터셋1 & 2 (중복 제거 약 2만9천개)
|
evandrodiniz/autonlp-api-boamente-417310788 | e93cd663824d21bef56eeab69392632339ed6c97 | 2021-12-14T18:38:02.000Z | [
"pytorch",
"bert",
"text-classification",
"unk",
"dataset:evandrodiniz/autonlp-data-api-boamente",
"transformers",
"autonlp",
"co2_eq_emissions"
] | text-classification | false | evandrodiniz | null | evandrodiniz/autonlp-api-boamente-417310788 | 4 | null | transformers | 18,583 | ---
tags: autonlp
language: unk
widget:
- text: "I love AutoNLP 🤗"
datasets:
- evandrodiniz/autonlp-data-api-boamente
co2_eq_emissions: 6.826886567147602
---
# Model Trained Using AutoNLP
- Problem type: Binary Classification
- Model ID: 417310788
- CO2 Emissions (in grams): 6.826886567147602
## Validation Metrics
- Loss: 0.20949310064315796
- Accuracy: 0.9578392621870883
- Precision: 0.9476190476190476
- Recall: 0.9045454545454545
- AUC: 0.9714032720526227
- F1: 0.9255813953488372
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/evandrodiniz/autonlp-api-boamente-417310788
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("evandrodiniz/autonlp-api-boamente-417310788", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("evandrodiniz/autonlp-api-boamente-417310788", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` |
evandrodiniz/autonlp-api-boamente-417310793 | 2e93ad9ab4b2fc417412399bf8c74fe9dc2d3b30 | 2021-12-14T18:39:10.000Z | [
"pytorch",
"bert",
"text-classification",
"unk",
"dataset:evandrodiniz/autonlp-data-api-boamente",
"transformers",
"autonlp",
"co2_eq_emissions"
] | text-classification | false | evandrodiniz | null | evandrodiniz/autonlp-api-boamente-417310793 | 4 | null | transformers | 18,584 | ---
tags: autonlp
language: unk
widget:
- text: "I love AutoNLP 🤗"
datasets:
- evandrodiniz/autonlp-data-api-boamente
co2_eq_emissions: 9.446754273734577
---
# Model Trained Using AutoNLP
- Problem type: Binary Classification
- Model ID: 417310793
- CO2 Emissions (in grams): 9.446754273734577
## Validation Metrics
- Loss: 0.25755178928375244
- Accuracy: 0.9407114624505929
- Precision: 0.8600823045267489
- Recall: 0.95
- AUC: 0.9732501264968797
- F1: 0.9028077753779697
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/evandrodiniz/autonlp-api-boamente-417310793
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("evandrodiniz/autonlp-api-boamente-417310793", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("evandrodiniz/autonlp-api-boamente-417310793", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` |
facebook/s2t-small-covost2-en-de-st | a61c96820ff12f2a916e2437c9342c8b8d321ed1 | 2022-02-07T15:15:09.000Z | [
"pytorch",
"tf",
"speech_to_text",
"automatic-speech-recognition",
"en",
"de",
"dataset:covost2",
"arxiv:2010.05171",
"arxiv:1912.06670",
"arxiv:1904.08779",
"transformers",
"audio",
"speech-translation",
"license:mit"
] | automatic-speech-recognition | false | facebook | null | facebook/s2t-small-covost2-en-de-st | 4 | null | transformers | 18,585 | ---
language:
- en
- de
datasets:
- covost2
tags:
- audio
- speech-translation
- automatic-speech-recognition
license: mit
pipeline_tag: automatic-speech-recognition
widget:
- example_title: Librispeech sample 1
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
- example_title: Librispeech sample 2
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
---
# S2T-SMALL-COVOST2-EN-DE-ST
`s2t-small-covost2-en-de-st` is a Speech to Text Transformer (S2T) model trained for end-to-end Speech Translation (ST).
The S2T model was proposed in [this paper](https://arxiv.org/abs/2010.05171) and released in
[this repository](https://github.com/pytorch/fairseq/tree/master/examples/speech_to_text)
## Model description
S2T is a transformer-based seq2seq (encoder-decoder) model designed for end-to-end Automatic Speech Recognition (ASR) and Speech
Translation (ST). It uses a convolutional downsampler to reduce the length of speech inputs by 3/4th before they are
fed into the encoder. The model is trained with standard autoregressive cross-entropy loss and generates the
transcripts/translations autoregressively.
## Intended uses & limitations
This model can be used for end-to-end English speech to German text translation.
See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look for other S2T checkpoints.
### How to use
As this a standard sequence to sequence transformer model, you can use the `generate` method to generate the
transcripts by passing the speech features to the model.
*Note: The `Speech2TextProcessor` object uses [torchaudio](https://github.com/pytorch/audio) to extract the
filter bank features. Make sure to install the `torchaudio` package before running this example.*
You could either install those as extra speech dependancies with
`pip install transformers"[speech, sentencepiece]"` or install the packages seperatly
with `pip install torchaudio sentencepiece`.
```python
import torch
from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
from datasets import load_dataset
import soundfile as sf
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-covost2-en-de-st")
processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-covost2-en-de-st")
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
ds = load_dataset(
"patrickvonplaten/librispeech_asr_dummy",
"clean",
split="validation"
)
ds = ds.map(map_to_array)
inputs = processor(
ds["speech"][0],
sampling_rate=48_000,
return_tensors="pt"
)
generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask"])
translation = processor.batch_decode(generated_ids, skip_special_tokens=True)
```
## Training data
The s2t-small-covost2-en-de-st is trained on English-German subset of [CoVoST2](https://github.com/facebookresearch/covost).
CoVoST is a large-scale multilingual ST corpus based on [Common Voice](https://arxiv.org/abs/1912.06670), created to to foster
ST research with the largest ever open dataset
## Training procedure
### Preprocessing
The speech data is pre-processed by extracting Kaldi-compliant 80-channel log mel-filter bank features automatically from
WAV/FLAC audio files via PyKaldi or torchaudio. Further utterance-level CMVN (cepstral mean and variance normalization)
is applied to each example.
The texts are lowercased and tokenized using character based SentencePiece vocab.
### Training
The model is trained with standard autoregressive cross-entropy loss and using [SpecAugment](https://arxiv.org/abs/1904.08779).
The encoder receives speech features, and the decoder generates the transcripts autoregressively. To accelerate
model training and for better performance the encoder is pre-trained for English ASR.
## Evaluation results
CoVOST2 test results for en-de (BLEU score): 16.29
### BibTeX entry and citation info
```bibtex
@inproceedings{wang2020fairseqs2t,
title = {fairseq S2T: Fast Speech-to-Text Modeling with fairseq},
author = {Changhan Wang and Yun Tang and Xutai Ma and Anne Wu and Dmytro Okhonko and Juan Pino},
booktitle = {Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations},
year = {2020},
}
```
|
facebook/s2t-small-covost2-en-et-st | c24d81b07cda06d0750fa31356becb3dd33bd32c | 2022-02-07T15:31:40.000Z | [
"pytorch",
"tf",
"speech_to_text",
"automatic-speech-recognition",
"en",
"et",
"dataset:covost2",
"arxiv:2010.05171",
"arxiv:1912.06670",
"arxiv:1904.08779",
"transformers",
"audio",
"speech-translation",
"license:mit"
] | automatic-speech-recognition | false | facebook | null | facebook/s2t-small-covost2-en-et-st | 4 | null | transformers | 18,586 | ---
language:
- en
- et
datasets:
- covost2
tags:
- audio
- speech-translation
- automatic-speech-recognition
license: mit
pipeline_tag: automatic-speech-recognition
widget:
- example_title: Librispeech sample 1
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
- example_title: Librispeech sample 2
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
---
# S2T-SMALL-COVOST2-EN-ET-ST
`s2t-small-covost2-en-et-st` is a Speech to Text Transformer (S2T) model trained for end-to-end Speech Translation (ST).
The S2T model was proposed in [this paper](https://arxiv.org/abs/2010.05171) and released in
[this repository](https://github.com/pytorch/fairseq/tree/master/examples/speech_to_text)
## Model description
S2T is a transformer-based seq2seq (encoder-decoder) model designed for end-to-end Automatic Speech Recognition (ASR) and Speech
Translation (ST). It uses a convolutional downsampler to reduce the length of speech inputs by 3/4th before they are
fed into the encoder. The model is trained with standard autoregressive cross-entropy loss and generates the
transcripts/translations autoregressively.
## Intended uses & limitations
This model can be used for end-to-end English speech to Estonian text translation.
See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look for other S2T checkpoints.
### How to use
As this a standard sequence to sequence transformer model, you can use the `generate` method to generate the
transcripts by passing the speech features to the model.
*Note: The `Speech2TextProcessor` object uses [torchaudio](https://github.com/pytorch/audio) to extract the
filter bank features. Make sure to install the `torchaudio` package before running this example.*
You could either install those as extra speech dependancies with
`pip install transformers"[speech, sentencepiece]"` or install the packages seperatly
with `pip install torchaudio sentencepiece`.
```python
import torch
from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
from datasets import load_dataset
import soundfile as sf
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-covost2-en-et-st")
processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-covost2-en-et-st")
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
ds = load_dataset(
"patrickvonplaten/librispeech_asr_dummy",
"clean",
split="validation"
)
ds = ds.map(map_to_array)
inputs = processor(
ds["speech"][0],
sampling_rate=48_000,
return_tensors="pt"
)
generated_ids = model.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask"])
translation = processor.batch_decode(generated_ids, skip_special_tokens=True)
```
## Training data
The s2t-small-covost2-en-et-st is trained on English-Estonian subset of [CoVoST2](https://github.com/facebookresearch/covost).
CoVoST is a large-scale multilingual ST corpus based on [Common Voice](https://arxiv.org/abs/1912.06670), created to to foster
ST research with the largest ever open dataset
## Training procedure
### Preprocessing
The speech data is pre-processed by extracting Kaldi-compliant 80-channel log mel-filter bank features automatically from
WAV/FLAC audio files via PyKaldi or torchaudio. Further utterance-level CMVN (cepstral mean and variance normalization)
is applied to each example.
The texts are lowercased and tokenized using character based SentencePiece vocab.
### Training
The model is trained with standard autoregressive cross-entropy loss and using [SpecAugment](https://arxiv.org/abs/1904.08779).
The encoder receives speech features, and the decoder generates the transcripts autoregressively. To accelerate
model training and for better performance the encoder is pre-trained for English ASR.
## Evaluation results
CoVOST2 test results for en-et (BLEU score): 13.01
### BibTeX entry and citation info
```bibtex
@inproceedings{wang2020fairseqs2t,
title = {fairseq S2T: Fast Speech-to-Text Modeling with fairseq},
author = {Changhan Wang and Yun Tang and Xutai Ma and Anne Wu and Dmytro Okhonko and Juan Pino},
booktitle = {Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations},
year = {2020},
}
```
|
facebook/wav2vec2-base-10k-voxpopuli-ft-es | 1f8c1fd6048a71c6c7644224c5a0aa87fb92cc27 | 2021-07-06T01:49:29.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"es",
"arxiv:2101.00390",
"transformers",
"audio",
"voxpopuli",
"license:cc-by-nc-4.0"
] | automatic-speech-recognition | false | facebook | null | facebook/wav2vec2-base-10k-voxpopuli-ft-es | 4 | null | transformers | 18,587 | ---
language: es
tags:
- audio
- automatic-speech-recognition
- voxpopuli
license: cc-by-nc-4.0
---
# Wav2Vec2-Base-VoxPopuli-Finetuned
[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) base model pretrained on the 10K unlabeled subset of [VoxPopuli corpus](https://arxiv.org/abs/2101.00390) and fine-tuned on the transcribed data in es (refer to Table 1 of paper for more information).
**Paper**: *[VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation
Learning, Semi-Supervised Learning and Interpretation](https://arxiv.org/abs/2101.00390)*
**Authors**: *Changhan Wang, Morgane Riviere, Ann Lee, Anne Wu, Chaitanya Talnikar, Daniel Haziza, Mary Williamson, Juan Pino, Emmanuel Dupoux* from *Facebook AI*
See the official website for more information, [here](https://github.com/facebookresearch/voxpopuli/)
# Usage for inference
In the following it is shown how the model can be used in inference on a sample of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets)
```python
#!/usr/bin/env python3
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from datasets import load_dataset
import torchaudio
import torch
# resample audio
# load model & processor
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-es")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-es")
# load dataset
ds = load_dataset("common_voice", "es", split="validation[:1%]")
# common voice does not match target sampling rate
common_voice_sample_rate = 48000
target_sample_rate = 16000
resampler = torchaudio.transforms.Resample(common_voice_sample_rate, target_sample_rate)
# define mapping fn to read in sound file and resample
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
speech = resampler(speech)
batch["speech"] = speech[0]
return batch
# load all audio files
ds = ds.map(map_to_array)
# run inference on the first 5 data samples
inputs = processor(ds[:5]["speech"], sampling_rate=target_sample_rate, return_tensors="pt", padding=True)
# inference
logits = model(**inputs).logits
predicted_ids = torch.argmax(logits, axis=-1)
print(processor.batch_decode(predicted_ids))
```
|
fadhilarkan/distilbert-base-uncased-finetuned-cola-3 | 83c372f77b17051ee4f502140387c34a1a604a05 | 2021-11-12T18:12:25.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | fadhilarkan | null | fadhilarkan/distilbert-base-uncased-finetuned-cola-3 | 4 | null | transformers | 18,588 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- matthews_correlation
model-index:
- name: distilbert-base-uncased-finetuned-cola-3
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-cola-3
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0002
- Matthews Correlation: 1.0
Label 0 : "AIMX"
Label 1 : "OWNX"
Label 2 : "CONT"
Label 3 : "BASE"
Label 4 : "MISC"
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| No log | 1.0 | 192 | 0.0060 | 1.0 |
| No log | 2.0 | 384 | 0.0019 | 1.0 |
| 0.0826 | 3.0 | 576 | 0.0010 | 1.0 |
| 0.0826 | 4.0 | 768 | 0.0006 | 1.0 |
| 0.0826 | 5.0 | 960 | 0.0005 | 1.0 |
| 0.001 | 6.0 | 1152 | 0.0004 | 1.0 |
| 0.001 | 7.0 | 1344 | 0.0003 | 1.0 |
| 0.0005 | 8.0 | 1536 | 0.0003 | 1.0 |
| 0.0005 | 9.0 | 1728 | 0.0002 | 1.0 |
| 0.0005 | 10.0 | 1920 | 0.0002 | 1.0 |
### Framework versions
- Transformers 4.12.3
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
|
federicopascual/distilbert-base-uncased-finetuned-cola | bb6c34815b83e521276e6e9861374cb24393462d | 2021-12-24T21:52:47.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | federicopascual | null | federicopascual/distilbert-base-uncased-finetuned-cola | 4 | null | transformers | 18,589 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: distilbert-base-uncased-finetuned-cola
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.5370037450559281
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-cola
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7480
- Matthews Correlation: 0.5370
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.5292 | 1.0 | 535 | 0.5110 | 0.4239 |
| 0.3508 | 2.0 | 1070 | 0.4897 | 0.4993 |
| 0.2346 | 3.0 | 1605 | 0.6275 | 0.5029 |
| 0.1806 | 4.0 | 2140 | 0.7480 | 0.5370 |
| 0.1291 | 5.0 | 2675 | 0.8841 | 0.5200 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
|
fgaim/tiroberta-sentiment | d1cf1d2004813897d23ebd6c1c06d859dd8a0928 | 2022-05-14T06:47:23.000Z | [
"pytorch",
"roberta",
"text-classification",
"ti",
"dataset:TLMD",
"transformers",
"model-index"
] | text-classification | false | fgaim | null | fgaim/tiroberta-sentiment | 4 | 1 | transformers | 18,590 | ---
language: ti
widget:
- text: "ድምጻዊ ኣብርሃም ኣፈወርቂ ንዘልኣለም ህያው ኮይኑ ኣብ ልብና ይነብር"
datasets:
- TLMD
metrics:
- accuracy
- f1
- precision
- recall
model-index:
- name: tiroberta-sentiment
results:
- task:
name: Text Classification
type: text-classification
metrics:
- name: Accuracy
type: accuracy
value: 0.828
- name: F1
type: f1
value: 0.8476527900797165
- name: Precision
type: precision
value: 0.760731319554849
- name: Recall
type: recall
value: 0.957
---
# Sentiment Analysis for Tigrinya with TiRoBERTa
This model is a fine-tuned version of [TiRoBERTa](https://huggingface.co/fgaim/roberta-base-tigrinya) on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020).
## Basic usage
```python
from transformers import pipeline
ti_sent = pipeline("sentiment-analysis", model="fgaim/tiroberta-sentiment")
ti_sent("ድምጻዊ ኣብርሃም ኣፈወርቂ ንዘልኣለም ህያው ኮይኑ ኣብ ልብና ይነብር")
```
## Training
### Hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Results
It achieves the following results on the evaluation set:
- F1: 0.8477
- Precision: 0.7607
- Recall: 0.957
- Accuracy: 0.828
- Loss: 0.6796
### Framework versions
- Transformers 4.10.3
- Pytorch 1.9.0+cu111
- Datasets 1.10.2
- Tokenizers 0.10.1
## Citation
If you use this model in your product or research, please cite as follows:
```
@article{Fitsum2021TiPLMs,
author={Fitsum Gaim and Wonsuk Yang and Jong C. Park},
title={Monolingual Pre-trained Language Models for Tigrinya},
year=2021,
publisher={WiNLP 2021/EMNLP 2021}
}
```
## References
```
Tela, A., Woubie, A. and Hautamäki, V. 2020.
Transferring Monolingual Model to Low-Resource Language: The Case of Tigrinya.
ArXiv, abs/2006.07698.
```
|
finiteautomata/bert-non-contextualized-hate-speech-es | d8930308341efd48d0f3c5af298549d49a3436e1 | 2021-05-19T16:52:40.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
] | text-classification | false | finiteautomata | null | finiteautomata/bert-non-contextualized-hate-speech-es | 4 | null | transformers | 18,591 | Entry not found |
flax-community/wav2vec2-base-persian | b2f4c8b02e6b71173ce1b72dc4841fb359e1bce3 | 2021-07-18T05:44:28.000Z | [
"pytorch",
"jax",
"tensorboard",
"wav2vec2",
"pretraining",
"fa",
"dataset:common_voice",
"transformers",
"speech",
"license:apache-2.0"
] | null | false | flax-community | null | flax-community/wav2vec2-base-persian | 4 | 1 | transformers | 18,592 | ---
language: fa
datasets:
- common_voice
tags:
- speech
license: apache-2.0
---
# Wav2Vec2 4 Persian
> This is part of the
[Flax/Jax Community Week](https://discuss.huggingface.co/t/pretrain-wav2vec2-in-persian/8180), organized by [HuggingFace](https://huggingface.co/) and TPU usage sponsored by Google.
## Team Members
- Mehrdad Farahani ([m3hrdadfi](https://huggingface.co/m3hrdadfi))
## Dataset TODO: Update
## How To Use TODO: Update
## Demo TODO: Update
## Evaluation TODO: Update |
flboehm/reddit-bert-text_5 | c12f71bb32f95c144e96c5989402c80bce762161 | 2021-12-18T12:05:58.000Z | [
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | fill-mask | false | flboehm | null | flboehm/reddit-bert-text_5 | 4 | null | transformers | 18,593 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: reddit-bert-text5
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# reddit-bert-text5
This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 2.5749
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.0257 | 1.0 | 945 | 2.6167 |
| 2.7138 | 2.0 | 1890 | 2.5529 |
| 2.6363 | 3.0 | 2835 | 2.5463 |
### Framework versions
- Transformers 4.14.1
- Pytorch 1.10.0+cu113
- Datasets 1.16.1
- Tokenizers 0.10.3
|
formermagic/codet5-base | 7b85f801367ff55c09bcccdb4a899c93f1c693b7 | 2021-09-19T13:30:39.000Z | [
"pytorch",
"jax",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | formermagic | null | formermagic/codet5-base | 4 | 1 | transformers | 18,594 | Entry not found |
fspanda/Electra-Medical-v1.5-discriminator | 181be3758977eb479dad6d15c0ec7ccfa52cae2d | 2020-11-04T15:00:32.000Z | [
"pytorch",
"electra",
"pretraining",
"transformers"
] | null | false | fspanda | null | fspanda/Electra-Medical-v1.5-discriminator | 4 | null | transformers | 18,595 | Entry not found |
g8a9/vit-geppetto-captioning | 7f05a9fc08fdfc88d676fb17232fdd5a98b608ec | 2021-11-29T09:57:21.000Z | [
"pytorch",
"vision-encoder-decoder",
"transformers"
] | null | false | g8a9 | null | g8a9/vit-geppetto-captioning | 4 | null | transformers | 18,596 | Entry not found |
gaotianyu1350/sup-simcse-bert-base-uncased | f938d5252193f7284296c621aff89b52ab7e7015 | 2021-05-19T17:03:12.000Z | [
"pytorch",
"jax",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | gaotianyu1350 | null | gaotianyu1350/sup-simcse-bert-base-uncased | 4 | null | transformers | 18,597 | Entry not found |
gaotianyu1350/unsup-simcse-bert-base-uncased | e92bd5ed977474cc6743a41f5bad3d96227a1efe | 2021-05-19T17:07:56.000Z | [
"pytorch",
"jax",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | gaotianyu1350 | null | gaotianyu1350/unsup-simcse-bert-base-uncased | 4 | null | transformers | 18,598 | Entry not found |
garynguyen1174/disaster_tweet_bert | cc16b0b8664f36a0d5a1de8715640d84cc30e841 | 2021-06-06T01:05:17.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | garynguyen1174 | null | garynguyen1174/disaster_tweet_bert | 4 | null | transformers | 18,599 | Entry not found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.