modelId
stringlengths
4
112
sha
stringlengths
40
40
lastModified
stringlengths
24
24
tags
sequence
pipeline_tag
stringclasses
29 values
private
bool
1 class
author
stringlengths
2
38
config
null
id
stringlengths
4
112
downloads
float64
0
36.8M
likes
float64
0
712
library_name
stringclasses
17 values
__index_level_0__
int64
0
38.5k
readme
stringlengths
0
186k
ytlin/21qspw2p
0744326231ee575439945bfa339de78d3c04da19
2021-05-23T13:49:48.000Z
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
false
ytlin
null
ytlin/21qspw2p
2
null
transformers
24,900
Entry not found
ytlin/CDial-GPT2_LCCC-base
4b45b17756a170df56191c032f7713ad20ae7be7
2020-10-05T14:39:38.000Z
[ "pytorch", "transformers" ]
null
false
ytlin
null
ytlin/CDial-GPT2_LCCC-base
2
null
transformers
24,901
Entry not found
yucahu/len1
e90a1f3a52798076c857fcb93f4b9ff98cb223b5
2021-05-23T13:54:23.000Z
[ "pytorch", "tf", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
false
yucahu
null
yucahu/len1
2
null
transformers
24,902
Entry not found
yxchar/tlm-hyp-small-scale
b616c8247f6c22aa190e2e1c9de8b24ca7755b8e
2021-11-04T15:23:52.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
yxchar
null
yxchar/tlm-hyp-small-scale
2
null
transformers
24,903
Entry not found
yxchar/tlm-sciie-medium-scale
91bfdce9c0d0c011b05a4bb1cd26cc118e296468
2021-11-04T17:34:18.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
yxchar
null
yxchar/tlm-sciie-medium-scale
2
null
transformers
24,904
Entry not found
z3c1f4/distilbert-base-uncased-finetuned-cola
b7d5d292f6a6ed8b888d398c4b8db26df718d6af
2022-02-22T07:48:31.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
z3c1f4
null
z3c1f4/distilbert-base-uncased-finetuned-cola
2
null
transformers
24,905
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5320879841803337 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.7400 - Matthews Correlation: 0.5321 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5298 | 1.0 | 535 | 0.5168 | 0.4092 | | 0.349 | 2.0 | 1070 | 0.4993 | 0.5099 | | 0.2345 | 3.0 | 1605 | 0.6194 | 0.5046 | | 0.1731 | 4.0 | 2140 | 0.7400 | 0.5321 | | 0.1282 | 5.0 | 2675 | 0.8724 | 0.5078 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
zaydzuhri/lelouch-medium
7597d56f4a989d78dab2ebfd15407af3ef60d9e9
2021-06-21T12:03:04.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
false
zaydzuhri
null
zaydzuhri/lelouch-medium
2
null
transformers
24,906
--- tags: - conversational --- # My Awesome Model
zeus0007/test
61d6661113845a40b3c216529a86424a156b887b
2021-09-30T06:20:38.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
zeus0007
null
zeus0007/test
2
null
transformers
24,907
Entry not found
zgotter/bert-base-finetuned-ynat
ef7436caa4323e8382054d9c046da05597bc6782
2021-09-24T02:00:26.000Z
[ "pytorch", "bert", "text-classification", "dataset:klue", "transformers", "generated_from_trainer", "model-index" ]
text-classification
false
zgotter
null
zgotter/bert-base-finetuned-ynat
2
null
transformers
24,908
--- tags: - generated_from_trainer datasets: - klue metrics: - f1 model-index: - name: bert-base-finetuned-ynat results: - task: name: Text Classification type: text-classification dataset: name: klue type: klue args: ynat metrics: - name: F1 type: f1 value: 0.8669116640755216 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-finetuned-ynat This model is a fine-tuned version of [klue/bert-base](https://huggingface.co/klue/bert-base) on the klue dataset. It achieves the following results on the evaluation set: - Loss: 0.3710 - F1: 0.8669 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | No log | 1.0 | 179 | 0.4223 | 0.8549 | | No log | 2.0 | 358 | 0.3710 | 0.8669 | | 0.2576 | 3.0 | 537 | 0.3891 | 0.8631 | | 0.2576 | 4.0 | 716 | 0.3968 | 0.8612 | | 0.2576 | 5.0 | 895 | 0.4044 | 0.8617 | ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
zgotter/bert_two_sent_classifier
50f2eb6d0a062e08e711f01c6d3acb3e2d06c6d4
2021-09-29T02:13:03.000Z
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
false
zgotter
null
zgotter/bert_two_sent_classifier
2
null
transformers
24,909
Entry not found
zhangle/distilbert-base-uncased-finetuned-cola
d0fabc618adcb43357509ec1db923191d4216d95
2022-02-20T10:44:31.000Z
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
false
zhangle
null
zhangle/distilbert-base-uncased-finetuned-cola
2
null
transformers
24,910
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.55727640631709 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8374 - Matthews Correlation: 0.5573 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5246 | 1.0 | 535 | 0.5219 | 0.4442 | | 0.3506 | 2.0 | 1070 | 0.5133 | 0.5127 | | 0.2395 | 3.0 | 1605 | 0.6590 | 0.5291 | | 0.17 | 4.0 | 2140 | 0.7683 | 0.5456 | | 0.1297 | 5.0 | 2675 | 0.8374 | 0.5573 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
zharry29/goal_benchmark_gpt
151b19395816bfb3f95081c9bacd57d19dd91528
2021-05-23T14:08:46.000Z
[ "pytorch", "gpt2", "transformers" ]
null
false
zharry29
null
zharry29/goal_benchmark_gpt
2
null
transformers
24,911
Entry not found
zharry29/goal_benchmark_xlnet
d307a3f33dcec40b56b1ff6c2c64106a708f374a
2020-09-16T20:02:36.000Z
[ "pytorch", "xlnet", "multiple-choice", "transformers" ]
multiple-choice
false
zharry29
null
zharry29/goal_benchmark_xlnet
2
null
transformers
24,912
Entry not found
zharry29/intent_enwh_xlmr
dd9a6a061e7a17f74c2331232d96bed096252578
2020-09-16T20:11:13.000Z
[ "pytorch", "xlm-roberta", "multiple-choice", "transformers" ]
multiple-choice
false
zharry29
null
zharry29/intent_enwh_xlmr
2
null
transformers
24,913
Entry not found
zharry29/intent_fb-es_enwh_id
e4fd164fded8e0f40b1c8d216bc37167052b79d9
2020-09-16T20:13:57.000Z
[ "pytorch", "xlm-roberta", "multiple-choice", "transformers" ]
multiple-choice
false
zharry29
null
zharry29/intent_fb-es_enwh_id
2
null
transformers
24,914
Entry not found
zharry29/intent_sgd_id
a01e5b6961c340be9949974609252be9a47057fa
2021-05-20T23:36:23.000Z
[ "pytorch", "jax", "roberta", "multiple-choice", "transformers" ]
multiple-choice
false
zharry29
null
zharry29/intent_sgd_id
2
null
transformers
24,915
Entry not found
zhuqing/RoBERTa-large-uncased-exp2-feminist
b64b9acd9cda1f5dbf66b1e17339e7c5ab56f62e
2021-08-28T15:20:38.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/RoBERTa-large-uncased-exp2-feminist
2
null
transformers
24,916
Entry not found
zhuqing/bert-base-uncased-exp2-parent
6151c9bcf22cc384465e6475f360f4a0d83e0a58
2021-08-28T17:45:02.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/bert-base-uncased-exp2-parent
2
null
transformers
24,917
Entry not found
zhuqing/bert-large-whole-uncased-exp2-parent
4608cf3fd24ecf23e0326f3a98ca599cbecfe02f
2021-08-29T08:12:05.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/bert-large-whole-uncased-exp2-parent
2
null
transformers
24,918
Entry not found
zhuqing/bert-large-whole-uncased-exp3-feminist-nointersection
088dfed195355f43bc1f05584dc382243482858d
2021-08-29T10:51:18.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/bert-large-whole-uncased-exp3-feminist-nointersection
2
null
transformers
24,919
Entry not found
zhuqing/comparison-bert-base-uncased-netmums-parent
731284db61b9d1f1e6e747c5c4490de66b04e816
2021-08-19T18:40:26.000Z
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/comparison-bert-base-uncased-netmums-parent
2
null
transformers
24,920
Entry not found
zhuqing/roberta-base-uncased-exp2-parent
3dad64fa18b61c9cfc14684eafaa89b7b9078323
2021-08-28T18:31:52.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/roberta-base-uncased-exp2-parent
2
null
transformers
24,921
Entry not found
zhuqing/roberta-large-uncased-exp3-feminist
9b9915aaab1619c4e996ce3f913d098d867cccc9
2021-08-29T05:33:13.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/roberta-large-uncased-exp3-feminist
2
null
transformers
24,922
Entry not found
zhuqing/roberta-large-uncased-exp3-parent
c062c4fd8729e514c578d428d13f4f1975c6f322
2021-08-28T21:24:10.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/roberta-large-uncased-exp3-parent
2
null
transformers
24,923
Entry not found
zhuqing/v1-theme1
fa9bd56224b860245e6c38b06a661712c7430859
2021-07-07T15:53:20.000Z
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
false
zhuqing
null
zhuqing/v1-theme1
2
null
transformers
24,924
Entry not found
zloelias/rubert-tiny2-kinopoisk-reviews-finetuned-clf
e357c5cd975c81e9005fafc5ddfa74dc8d8bab9a
2021-12-06T19:40:03.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
zloelias
null
zloelias/rubert-tiny2-kinopoisk-reviews-finetuned-clf
2
null
transformers
24,925
Entry not found
zloelias/rubert-tiny2-lenta-ru-finetuned-clf
0cd4bc401c7ec9f105f84a8dd2673bfc76a6023b
2021-11-30T23:21:55.000Z
[ "pytorch", "tensorboard", "bert", "text-classification", "transformers" ]
text-classification
false
zloelias
null
zloelias/rubert-tiny2-lenta-ru-finetuned-clf
2
null
transformers
24,926
Entry not found
wietsedv/xlm-roberta-base-ft-udpos28-fi
68e88f7febdd46a12b0dad95d16ef14d768d3c7d
2022-02-25T09:58:27.000Z
[ "pytorch", "xlm-roberta", "token-classification", "fi", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-fi
2
null
transformers
24,927
--- language: - fi license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-fi results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 73.9 - type: accuracy name: Dutch Test accuracy value: 69.8 - type: accuracy name: German Test accuracy value: 69.9 - type: accuracy name: Italian Test accuracy value: 73.2 - type: accuracy name: French Test accuracy value: 69.0 - type: accuracy name: Spanish Test accuracy value: 65.6 - type: accuracy name: Russian Test accuracy value: 82.9 - type: accuracy name: Swedish Test accuracy value: 79.8 - type: accuracy name: Norwegian Test accuracy value: 75.1 - type: accuracy name: Danish Test accuracy value: 79.1 - type: accuracy name: Low Saxon Test accuracy value: 50.9 - type: accuracy name: Akkadian Test accuracy value: 34.2 - type: accuracy name: Armenian Test accuracy value: 88.6 - type: accuracy name: Welsh Test accuracy value: 57.3 - type: accuracy name: Old East Slavic Test accuracy value: 68.4 - type: accuracy name: Albanian Test accuracy value: 68.9 - type: accuracy name: Slovenian Test accuracy value: 74.9 - type: accuracy name: Guajajara Test accuracy value: 27.2 - type: accuracy name: Kurmanji Test accuracy value: 70.6 - type: accuracy name: Turkish Test accuracy value: 77.5 - type: accuracy name: Finnish Test accuracy value: 93.8 - type: accuracy name: Indonesian Test accuracy value: 77.5 - type: accuracy name: Ukrainian Test accuracy value: 82.5 - type: accuracy name: Polish Test accuracy value: 79.5 - type: accuracy name: Portuguese Test accuracy value: 72.1 - type: accuracy name: Kazakh Test accuracy value: 84.1 - type: accuracy name: Latin Test accuracy value: 73.6 - type: accuracy name: Old French Test accuracy value: 50.1 - type: accuracy name: Buryat Test accuracy value: 64.6 - type: accuracy name: Kaapor Test accuracy value: 13.8 - type: accuracy name: Korean Test accuracy value: 64.4 - type: accuracy name: Estonian Test accuracy value: 90.0 - type: accuracy name: Croatian Test accuracy value: 81.7 - type: accuracy name: Gothic Test accuracy value: 24.8 - type: accuracy name: Swiss German Test accuracy value: 41.8 - type: accuracy name: Assyrian Test accuracy value: 14.6 - type: accuracy name: North Sami Test accuracy value: 46.3 - type: accuracy name: Naija Test accuracy value: 36.9 - type: accuracy name: Latvian Test accuracy value: 87.5 - type: accuracy name: Chinese Test accuracy value: 55.9 - type: accuracy name: Tagalog Test accuracy value: 62.8 - type: accuracy name: Bambara Test accuracy value: 28.8 - type: accuracy name: Lithuanian Test accuracy value: 87.1 - type: accuracy name: Galician Test accuracy value: 68.3 - type: accuracy name: Vietnamese Test accuracy value: 61.7 - type: accuracy name: Greek Test accuracy value: 72.1 - type: accuracy name: Catalan Test accuracy value: 63.7 - type: accuracy name: Czech Test accuracy value: 81.3 - type: accuracy name: Erzya Test accuracy value: 51.8 - type: accuracy name: Bhojpuri Test accuracy value: 53.0 - type: accuracy name: Thai Test accuracy value: 58.3 - type: accuracy name: Marathi Test accuracy value: 87.1 - type: accuracy name: Basque Test accuracy value: 77.4 - type: accuracy name: Slovak Test accuracy value: 81.2 - type: accuracy name: Kiche Test accuracy value: 39.0 - type: accuracy name: Yoruba Test accuracy value: 30.6 - type: accuracy name: Warlpiri Test accuracy value: 49.8 - type: accuracy name: Tamil Test accuracy value: 87.7 - type: accuracy name: Maltese Test accuracy value: 29.4 - type: accuracy name: Ancient Greek Test accuracy value: 64.4 - type: accuracy name: Icelandic Test accuracy value: 75.6 - type: accuracy name: Mbya Guarani Test accuracy value: 36.5 - type: accuracy name: Urdu Test accuracy value: 66.1 - type: accuracy name: Romanian Test accuracy value: 71.8 - type: accuracy name: Persian Test accuracy value: 67.6 - type: accuracy name: Apurina Test accuracy value: 51.8 - type: accuracy name: Japanese Test accuracy value: 44.1 - type: accuracy name: Hungarian Test accuracy value: 76.1 - type: accuracy name: Hindi Test accuracy value: 70.2 - type: accuracy name: Classical Chinese Test accuracy value: 29.6 - type: accuracy name: Komi Permyak Test accuracy value: 54.0 - type: accuracy name: Faroese Test accuracy value: 69.4 - type: accuracy name: Sanskrit Test accuracy value: 40.0 - type: accuracy name: Livvi Test accuracy value: 73.1 - type: accuracy name: Arabic Test accuracy value: 70.7 - type: accuracy name: Wolof Test accuracy value: 36.9 - type: accuracy name: Bulgarian Test accuracy value: 80.4 - type: accuracy name: Akuntsu Test accuracy value: 35.5 - type: accuracy name: Makurap Test accuracy value: 19.2 - type: accuracy name: Kangri Test accuracy value: 52.2 - type: accuracy name: Breton Test accuracy value: 58.0 - type: accuracy name: Telugu Test accuracy value: 86.3 - type: accuracy name: Cantonese Test accuracy value: 54.6 - type: accuracy name: Old Church Slavonic Test accuracy value: 46.6 - type: accuracy name: Karelian Test accuracy value: 79.4 - type: accuracy name: Upper Sorbian Test accuracy value: 70.9 - type: accuracy name: South Levantine Arabic Test accuracy value: 66.2 - type: accuracy name: Komi Zyrian Test accuracy value: 47.3 - type: accuracy name: Irish Test accuracy value: 57.7 - type: accuracy name: Nayini Test accuracy value: 43.6 - type: accuracy name: Munduruku Test accuracy value: 29.2 - type: accuracy name: Manx Test accuracy value: 32.8 - type: accuracy name: Skolt Sami Test accuracy value: 39.4 - type: accuracy name: Afrikaans Test accuracy value: 71.0 - type: accuracy name: Old Turkish Test accuracy value: 37.1 - type: accuracy name: Tupinamba Test accuracy value: 41.0 - type: accuracy name: Belarusian Test accuracy value: 83.4 - type: accuracy name: Serbian Test accuracy value: 81.7 - type: accuracy name: Moksha Test accuracy value: 48.7 - type: accuracy name: Western Armenian Test accuracy value: 80.3 - type: accuracy name: Scottish Gaelic Test accuracy value: 49.8 - type: accuracy name: Khunsari Test accuracy value: 45.9 - type: accuracy name: Hebrew Test accuracy value: 83.3 - type: accuracy name: Uyghur Test accuracy value: 78.6 - type: accuracy name: Chukchi Test accuracy value: 38.6 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Finnish This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-fi") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-fi") ```
wietsedv/xlm-roberta-base-ft-udpos28-gl
99c48108dce99f4163a6f72ab841b3690e9c9c63
2022-02-25T09:58:36.000Z
[ "pytorch", "xlm-roberta", "token-classification", "gl", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-gl
2
null
transformers
24,928
--- language: - gl license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-gl results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 86.5 - type: accuracy name: Dutch Test accuracy value: 87.6 - type: accuracy name: German Test accuracy value: 83.3 - type: accuracy name: Italian Test accuracy value: 88.6 - type: accuracy name: French Test accuracy value: 88.3 - type: accuracy name: Spanish Test accuracy value: 86.6 - type: accuracy name: Russian Test accuracy value: 89.2 - type: accuracy name: Swedish Test accuracy value: 87.7 - type: accuracy name: Norwegian Test accuracy value: 83.2 - type: accuracy name: Danish Test accuracy value: 87.8 - type: accuracy name: Low Saxon Test accuracy value: 53.1 - type: accuracy name: Akkadian Test accuracy value: 30.7 - type: accuracy name: Armenian Test accuracy value: 84.7 - type: accuracy name: Welsh Test accuracy value: 67.1 - type: accuracy name: Old East Slavic Test accuracy value: 73.7 - type: accuracy name: Albanian Test accuracy value: 79.7 - type: accuracy name: Slovenian Test accuracy value: 78.4 - type: accuracy name: Guajajara Test accuracy value: 25.8 - type: accuracy name: Kurmanji Test accuracy value: 79.4 - type: accuracy name: Turkish Test accuracy value: 76.8 - type: accuracy name: Finnish Test accuracy value: 84.4 - type: accuracy name: Indonesian Test accuracy value: 83.9 - type: accuracy name: Ukrainian Test accuracy value: 86.6 - type: accuracy name: Polish Test accuracy value: 86.8 - type: accuracy name: Portuguese Test accuracy value: 90.9 - type: accuracy name: Kazakh Test accuracy value: 81.1 - type: accuracy name: Latin Test accuracy value: 80.0 - type: accuracy name: Old French Test accuracy value: 64.0 - type: accuracy name: Buryat Test accuracy value: 58.0 - type: accuracy name: Kaapor Test accuracy value: 18.8 - type: accuracy name: Korean Test accuracy value: 62.5 - type: accuracy name: Estonian Test accuracy value: 85.3 - type: accuracy name: Croatian Test accuracy value: 88.3 - type: accuracy name: Gothic Test accuracy value: 22.4 - type: accuracy name: Swiss German Test accuracy value: 47.9 - type: accuracy name: Assyrian Test accuracy value: 14.6 - type: accuracy name: North Sami Test accuracy value: 32.1 - type: accuracy name: Naija Test accuracy value: 41.1 - type: accuracy name: Latvian Test accuracy value: 86.5 - type: accuracy name: Chinese Test accuracy value: 32.8 - type: accuracy name: Tagalog Test accuracy value: 71.9 - type: accuracy name: Bambara Test accuracy value: 28.8 - type: accuracy name: Lithuanian Test accuracy value: 85.4 - type: accuracy name: Galician Test accuracy value: 93.8 - type: accuracy name: Vietnamese Test accuracy value: 63.8 - type: accuracy name: Greek Test accuracy value: 87.6 - type: accuracy name: Catalan Test accuracy value: 87.4 - type: accuracy name: Czech Test accuracy value: 87.6 - type: accuracy name: Erzya Test accuracy value: 42.6 - type: accuracy name: Bhojpuri Test accuracy value: 52.0 - type: accuracy name: Thai Test accuracy value: 49.3 - type: accuracy name: Marathi Test accuracy value: 80.4 - type: accuracy name: Basque Test accuracy value: 75.8 - type: accuracy name: Slovak Test accuracy value: 87.6 - type: accuracy name: Kiche Test accuracy value: 31.8 - type: accuracy name: Yoruba Test accuracy value: 21.5 - type: accuracy name: Warlpiri Test accuracy value: 34.4 - type: accuracy name: Tamil Test accuracy value: 81.6 - type: accuracy name: Maltese Test accuracy value: 25.2 - type: accuracy name: Ancient Greek Test accuracy value: 59.4 - type: accuracy name: Icelandic Test accuracy value: 82.0 - type: accuracy name: Mbya Guarani Test accuracy value: 29.2 - type: accuracy name: Urdu Test accuracy value: 64.6 - type: accuracy name: Romanian Test accuracy value: 84.5 - type: accuracy name: Persian Test accuracy value: 78.9 - type: accuracy name: Apurina Test accuracy value: 32.8 - type: accuracy name: Japanese Test accuracy value: 20.0 - type: accuracy name: Hungarian Test accuracy value: 83.0 - type: accuracy name: Hindi Test accuracy value: 71.8 - type: accuracy name: Classical Chinese Test accuracy value: 14.3 - type: accuracy name: Komi Permyak Test accuracy value: 42.7 - type: accuracy name: Faroese Test accuracy value: 76.8 - type: accuracy name: Sanskrit Test accuracy value: 21.0 - type: accuracy name: Livvi Test accuracy value: 62.4 - type: accuracy name: Arabic Test accuracy value: 82.1 - type: accuracy name: Wolof Test accuracy value: 33.2 - type: accuracy name: Bulgarian Test accuracy value: 89.5 - type: accuracy name: Akuntsu Test accuracy value: 24.4 - type: accuracy name: Makurap Test accuracy value: 16.4 - type: accuracy name: Kangri Test accuracy value: 43.6 - type: accuracy name: Breton Test accuracy value: 66.2 - type: accuracy name: Telugu Test accuracy value: 79.6 - type: accuracy name: Cantonese Test accuracy value: 37.0 - type: accuracy name: Old Church Slavonic Test accuracy value: 49.5 - type: accuracy name: Karelian Test accuracy value: 69.5 - type: accuracy name: Upper Sorbian Test accuracy value: 73.2 - type: accuracy name: South Levantine Arabic Test accuracy value: 65.1 - type: accuracy name: Komi Zyrian Test accuracy value: 36.2 - type: accuracy name: Irish Test accuracy value: 69.2 - type: accuracy name: Nayini Test accuracy value: 43.6 - type: accuracy name: Munduruku Test accuracy value: 19.7 - type: accuracy name: Manx Test accuracy value: 33.4 - type: accuracy name: Skolt Sami Test accuracy value: 30.3 - type: accuracy name: Afrikaans Test accuracy value: 83.3 - type: accuracy name: Old Turkish Test accuracy value: 37.1 - type: accuracy name: Tupinamba Test accuracy value: 26.9 - type: accuracy name: Belarusian Test accuracy value: 87.9 - type: accuracy name: Serbian Test accuracy value: 89.8 - type: accuracy name: Moksha Test accuracy value: 38.8 - type: accuracy name: Western Armenian Test accuracy value: 78.1 - type: accuracy name: Scottish Gaelic Test accuracy value: 58.7 - type: accuracy name: Khunsari Test accuracy value: 35.1 - type: accuracy name: Hebrew Test accuracy value: 90.6 - type: accuracy name: Uyghur Test accuracy value: 70.7 - type: accuracy name: Chukchi Test accuracy value: 28.7 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Galician This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-gl") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-gl") ```
wietsedv/xlm-roberta-base-ft-udpos28-hi
f0533e3c2b3cfaa0e26bf7a35e06811711699c55
2022-02-25T09:58:42.000Z
[ "pytorch", "xlm-roberta", "token-classification", "hi", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-hi
2
null
transformers
24,929
--- language: - hi license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-hi results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 75.9 - type: accuracy name: Dutch Test accuracy value: 72.3 - type: accuracy name: German Test accuracy value: 69.4 - type: accuracy name: Italian Test accuracy value: 68.1 - type: accuracy name: French Test accuracy value: 67.1 - type: accuracy name: Spanish Test accuracy value: 70.2 - type: accuracy name: Russian Test accuracy value: 82.9 - type: accuracy name: Swedish Test accuracy value: 77.4 - type: accuracy name: Norwegian Test accuracy value: 72.4 - type: accuracy name: Danish Test accuracy value: 74.9 - type: accuracy name: Low Saxon Test accuracy value: 48.0 - type: accuracy name: Akkadian Test accuracy value: 21.7 - type: accuracy name: Armenian Test accuracy value: 82.1 - type: accuracy name: Welsh Test accuracy value: 59.4 - type: accuracy name: Old East Slavic Test accuracy value: 63.6 - type: accuracy name: Albanian Test accuracy value: 68.5 - type: accuracy name: Slovenian Test accuracy value: 71.3 - type: accuracy name: Guajajara Test accuracy value: 18.5 - type: accuracy name: Kurmanji Test accuracy value: 71.8 - type: accuracy name: Turkish Test accuracy value: 75.4 - type: accuracy name: Finnish Test accuracy value: 80.3 - type: accuracy name: Indonesian Test accuracy value: 76.6 - type: accuracy name: Ukrainian Test accuracy value: 80.8 - type: accuracy name: Polish Test accuracy value: 81.1 - type: accuracy name: Portuguese Test accuracy value: 71.5 - type: accuracy name: Kazakh Test accuracy value: 82.0 - type: accuracy name: Latin Test accuracy value: 69.3 - type: accuracy name: Old French Test accuracy value: 44.0 - type: accuracy name: Buryat Test accuracy value: 53.9 - type: accuracy name: Kaapor Test accuracy value: 10.8 - type: accuracy name: Korean Test accuracy value: 57.8 - type: accuracy name: Estonian Test accuracy value: 81.0 - type: accuracy name: Croatian Test accuracy value: 79.8 - type: accuracy name: Gothic Test accuracy value: 8.6 - type: accuracy name: Swiss German Test accuracy value: 42.2 - type: accuracy name: Assyrian Test accuracy value: 16.3 - type: accuracy name: North Sami Test accuracy value: 26.2 - type: accuracy name: Naija Test accuracy value: 35.8 - type: accuracy name: Latvian Test accuracy value: 80.2 - type: accuracy name: Chinese Test accuracy value: 37.1 - type: accuracy name: Tagalog Test accuracy value: 71.3 - type: accuracy name: Bambara Test accuracy value: 22.2 - type: accuracy name: Lithuanian Test accuracy value: 81.3 - type: accuracy name: Galician Test accuracy value: 70.7 - type: accuracy name: Vietnamese Test accuracy value: 60.6 - type: accuracy name: Greek Test accuracy value: 69.5 - type: accuracy name: Catalan Test accuracy value: 68.7 - type: accuracy name: Czech Test accuracy value: 78.8 - type: accuracy name: Erzya Test accuracy value: 36.3 - type: accuracy name: Bhojpuri Test accuracy value: 61.2 - type: accuracy name: Thai Test accuracy value: 52.8 - type: accuracy name: Marathi Test accuracy value: 82.2 - type: accuracy name: Basque Test accuracy value: 78.8 - type: accuracy name: Slovak Test accuracy value: 78.9 - type: accuracy name: Kiche Test accuracy value: 21.7 - type: accuracy name: Yoruba Test accuracy value: 19.3 - type: accuracy name: Warlpiri Test accuracy value: 23.5 - type: accuracy name: Tamil Test accuracy value: 85.7 - type: accuracy name: Maltese Test accuracy value: 16.3 - type: accuracy name: Ancient Greek Test accuracy value: 54.9 - type: accuracy name: Icelandic Test accuracy value: 70.4 - type: accuracy name: Mbya Guarani Test accuracy value: 23.2 - type: accuracy name: Urdu Test accuracy value: 89.7 - type: accuracy name: Romanian Test accuracy value: 72.1 - type: accuracy name: Persian Test accuracy value: 78.1 - type: accuracy name: Apurina Test accuracy value: 22.9 - type: accuracy name: Japanese Test accuracy value: 29.3 - type: accuracy name: Hungarian Test accuracy value: 75.4 - type: accuracy name: Hindi Test accuracy value: 93.7 - type: accuracy name: Classical Chinese Test accuracy value: 18.4 - type: accuracy name: Komi Permyak Test accuracy value: 34.3 - type: accuracy name: Faroese Test accuracy value: 64.9 - type: accuracy name: Sanskrit Test accuracy value: 14.0 - type: accuracy name: Livvi Test accuracy value: 57.9 - type: accuracy name: Arabic Test accuracy value: 73.9 - type: accuracy name: Wolof Test accuracy value: 24.9 - type: accuracy name: Bulgarian Test accuracy value: 81.3 - type: accuracy name: Akuntsu Test accuracy value: 16.2 - type: accuracy name: Makurap Test accuracy value: 2.7 - type: accuracy name: Kangri Test accuracy value: 52.8 - type: accuracy name: Breton Test accuracy value: 49.5 - type: accuracy name: Telugu Test accuracy value: 85.4 - type: accuracy name: Cantonese Test accuracy value: 42.1 - type: accuracy name: Old Church Slavonic Test accuracy value: 35.1 - type: accuracy name: Karelian Test accuracy value: 64.9 - type: accuracy name: Upper Sorbian Test accuracy value: 64.2 - type: accuracy name: South Levantine Arabic Test accuracy value: 60.1 - type: accuracy name: Komi Zyrian Test accuracy value: 29.7 - type: accuracy name: Irish Test accuracy value: 56.5 - type: accuracy name: Nayini Test accuracy value: 39.7 - type: accuracy name: Munduruku Test accuracy value: 9.3 - type: accuracy name: Manx Test accuracy value: 25.3 - type: accuracy name: Skolt Sami Test accuracy value: 26.9 - type: accuracy name: Afrikaans Test accuracy value: 71.9 - type: accuracy name: Old Turkish Test accuracy value: 43.0 - type: accuracy name: Tupinamba Test accuracy value: 21.3 - type: accuracy name: Belarusian Test accuracy value: 80.5 - type: accuracy name: Serbian Test accuracy value: 79.9 - type: accuracy name: Moksha Test accuracy value: 34.3 - type: accuracy name: Western Armenian Test accuracy value: 74.9 - type: accuracy name: Scottish Gaelic Test accuracy value: 49.1 - type: accuracy name: Khunsari Test accuracy value: 37.8 - type: accuracy name: Hebrew Test accuracy value: 81.2 - type: accuracy name: Uyghur Test accuracy value: 75.8 - type: accuracy name: Chukchi Test accuracy value: 27.0 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Hindi This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-hi") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-hi") ```
wietsedv/xlm-roberta-base-ft-udpos28-hy
f23196284ee2f95880abff6aef04815d1f448dcf
2022-02-25T09:58:47.000Z
[ "pytorch", "xlm-roberta", "token-classification", "hy", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-hy
2
null
transformers
24,930
--- language: - hy license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-hy results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 84.7 - type: accuracy name: Dutch Test accuracy value: 85.3 - type: accuracy name: German Test accuracy value: 84.1 - type: accuracy name: Italian Test accuracy value: 82.9 - type: accuracy name: French Test accuracy value: 82.6 - type: accuracy name: Spanish Test accuracy value: 83.2 - type: accuracy name: Russian Test accuracy value: 92.1 - type: accuracy name: Swedish Test accuracy value: 87.5 - type: accuracy name: Norwegian Test accuracy value: 82.5 - type: accuracy name: Danish Test accuracy value: 86.6 - type: accuracy name: Low Saxon Test accuracy value: 40.1 - type: accuracy name: Akkadian Test accuracy value: 7.0 - type: accuracy name: Armenian Test accuracy value: 97.0 - type: accuracy name: Welsh Test accuracy value: 65.3 - type: accuracy name: Old East Slavic Test accuracy value: 73.6 - type: accuracy name: Albanian Test accuracy value: 75.8 - type: accuracy name: Slovenian Test accuracy value: 80.8 - type: accuracy name: Guajajara Test accuracy value: 14.8 - type: accuracy name: Kurmanji Test accuracy value: 77.9 - type: accuracy name: Turkish Test accuracy value: 79.3 - type: accuracy name: Finnish Test accuracy value: 86.3 - type: accuracy name: Indonesian Test accuracy value: 80.5 - type: accuracy name: Ukrainian Test accuracy value: 91.0 - type: accuracy name: Polish Test accuracy value: 86.3 - type: accuracy name: Portuguese Test accuracy value: 84.6 - type: accuracy name: Kazakh Test accuracy value: 86.3 - type: accuracy name: Latin Test accuracy value: 79.8 - type: accuracy name: Old French Test accuracy value: 47.9 - type: accuracy name: Buryat Test accuracy value: 59.5 - type: accuracy name: Kaapor Test accuracy value: 4.6 - type: accuracy name: Korean Test accuracy value: 64.1 - type: accuracy name: Estonian Test accuracy value: 86.1 - type: accuracy name: Croatian Test accuracy value: 88.6 - type: accuracy name: Gothic Test accuracy value: 6.5 - type: accuracy name: Swiss German Test accuracy value: 43.7 - type: accuracy name: Assyrian Test accuracy value: 14.6 - type: accuracy name: North Sami Test accuracy value: 23.7 - type: accuracy name: Naija Test accuracy value: 36.1 - type: accuracy name: Latvian Test accuracy value: 90.0 - type: accuracy name: Chinese Test accuracy value: 43.5 - type: accuracy name: Tagalog Test accuracy value: 71.8 - type: accuracy name: Bambara Test accuracy value: 17.2 - type: accuracy name: Lithuanian Test accuracy value: 89.0 - type: accuracy name: Galician Test accuracy value: 83.6 - type: accuracy name: Vietnamese Test accuracy value: 66.4 - type: accuracy name: Greek Test accuracy value: 86.9 - type: accuracy name: Catalan Test accuracy value: 82.3 - type: accuracy name: Czech Test accuracy value: 88.7 - type: accuracy name: Erzya Test accuracy value: 40.9 - type: accuracy name: Bhojpuri Test accuracy value: 53.6 - type: accuracy name: Thai Test accuracy value: 67.5 - type: accuracy name: Marathi Test accuracy value: 83.4 - type: accuracy name: Basque Test accuracy value: 79.0 - type: accuracy name: Slovak Test accuracy value: 89.5 - type: accuracy name: Kiche Test accuracy value: 19.8 - type: accuracy name: Yoruba Test accuracy value: 15.4 - type: accuracy name: Warlpiri Test accuracy value: 25.5 - type: accuracy name: Tamil Test accuracy value: 86.9 - type: accuracy name: Maltese Test accuracy value: 14.7 - type: accuracy name: Ancient Greek Test accuracy value: 67.4 - type: accuracy name: Icelandic Test accuracy value: 82.2 - type: accuracy name: Mbya Guarani Test accuracy value: 22.8 - type: accuracy name: Urdu Test accuracy value: 70.6 - type: accuracy name: Romanian Test accuracy value: 82.4 - type: accuracy name: Persian Test accuracy value: 79.2 - type: accuracy name: Apurina Test accuracy value: 25.2 - type: accuracy name: Japanese Test accuracy value: 30.3 - type: accuracy name: Hungarian Test accuracy value: 85.7 - type: accuracy name: Hindi Test accuracy value: 75.7 - type: accuracy name: Classical Chinese Test accuracy value: 26.3 - type: accuracy name: Komi Permyak Test accuracy value: 38.3 - type: accuracy name: Faroese Test accuracy value: 76.5 - type: accuracy name: Sanskrit Test accuracy value: 23.7 - type: accuracy name: Livvi Test accuracy value: 58.1 - type: accuracy name: Arabic Test accuracy value: 78.6 - type: accuracy name: Wolof Test accuracy value: 16.3 - type: accuracy name: Bulgarian Test accuracy value: 90.3 - type: accuracy name: Akuntsu Test accuracy value: 11.6 - type: accuracy name: Makurap Test accuracy value: 1.4 - type: accuracy name: Kangri Test accuracy value: 51.3 - type: accuracy name: Breton Test accuracy value: 65.5 - type: accuracy name: Telugu Test accuracy value: 85.6 - type: accuracy name: Cantonese Test accuracy value: 48.2 - type: accuracy name: Old Church Slavonic Test accuracy value: 44.4 - type: accuracy name: Karelian Test accuracy value: 67.7 - type: accuracy name: Upper Sorbian Test accuracy value: 69.5 - type: accuracy name: South Levantine Arabic Test accuracy value: 69.6 - type: accuracy name: Komi Zyrian Test accuracy value: 33.0 - type: accuracy name: Irish Test accuracy value: 62.4 - type: accuracy name: Nayini Test accuracy value: 48.7 - type: accuracy name: Munduruku Test accuracy value: 7.6 - type: accuracy name: Manx Test accuracy value: 19.6 - type: accuracy name: Skolt Sami Test accuracy value: 26.8 - type: accuracy name: Afrikaans Test accuracy value: 83.9 - type: accuracy name: Old Turkish Test accuracy value: 37.1 - type: accuracy name: Tupinamba Test accuracy value: 20.9 - type: accuracy name: Belarusian Test accuracy value: 91.9 - type: accuracy name: Serbian Test accuracy value: 89.7 - type: accuracy name: Moksha Test accuracy value: 40.7 - type: accuracy name: Western Armenian Test accuracy value: 84.5 - type: accuracy name: Scottish Gaelic Test accuracy value: 56.9 - type: accuracy name: Khunsari Test accuracy value: 43.2 - type: accuracy name: Hebrew Test accuracy value: 91.7 - type: accuracy name: Uyghur Test accuracy value: 78.1 - type: accuracy name: Chukchi Test accuracy value: 33.2 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Armenian This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-hy") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-hy") ```
wietsedv/xlm-roberta-base-ft-udpos28-it
8cc7a77159511e194c6ee0dce8151fdfbdc2bf30
2022-02-25T09:58:53.000Z
[ "pytorch", "xlm-roberta", "token-classification", "it", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-it
2
null
transformers
24,931
--- language: - it license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-it results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 89.1 - type: accuracy name: Dutch Test accuracy value: 89.4 - type: accuracy name: German Test accuracy value: 83.4 - type: accuracy name: Italian Test accuracy value: 96.3 - type: accuracy name: French Test accuracy value: 92.2 - type: accuracy name: Spanish Test accuracy value: 94.0 - type: accuracy name: Russian Test accuracy value: 90.5 - type: accuracy name: Swedish Test accuracy value: 91.1 - type: accuracy name: Norwegian Test accuracy value: 84.7 - type: accuracy name: Danish Test accuracy value: 91.6 - type: accuracy name: Low Saxon Test accuracy value: 48.7 - type: accuracy name: Akkadian Test accuracy value: 21.8 - type: accuracy name: Armenian Test accuracy value: 87.6 - type: accuracy name: Welsh Test accuracy value: 66.4 - type: accuracy name: Old East Slavic Test accuracy value: 76.9 - type: accuracy name: Albanian Test accuracy value: 81.2 - type: accuracy name: Slovenian Test accuracy value: 79.1 - type: accuracy name: Guajajara Test accuracy value: 20.3 - type: accuracy name: Kurmanji Test accuracy value: 78.2 - type: accuracy name: Turkish Test accuracy value: 77.0 - type: accuracy name: Finnish Test accuracy value: 86.0 - type: accuracy name: Indonesian Test accuracy value: 86.4 - type: accuracy name: Ukrainian Test accuracy value: 88.1 - type: accuracy name: Polish Test accuracy value: 86.9 - type: accuracy name: Portuguese Test accuracy value: 92.8 - type: accuracy name: Kazakh Test accuracy value: 82.8 - type: accuracy name: Latin Test accuracy value: 79.8 - type: accuracy name: Old French Test accuracy value: 62.7 - type: accuracy name: Buryat Test accuracy value: 55.2 - type: accuracy name: Kaapor Test accuracy value: 11.7 - type: accuracy name: Korean Test accuracy value: 63.5 - type: accuracy name: Estonian Test accuracy value: 87.9 - type: accuracy name: Croatian Test accuracy value: 89.0 - type: accuracy name: Gothic Test accuracy value: 12.0 - type: accuracy name: Swiss German Test accuracy value: 40.8 - type: accuracy name: Assyrian Test accuracy value: 14.3 - type: accuracy name: North Sami Test accuracy value: 23.7 - type: accuracy name: Naija Test accuracy value: 36.4 - type: accuracy name: Latvian Test accuracy value: 87.1 - type: accuracy name: Chinese Test accuracy value: 39.9 - type: accuracy name: Tagalog Test accuracy value: 72.3 - type: accuracy name: Bambara Test accuracy value: 23.2 - type: accuracy name: Lithuanian Test accuracy value: 85.8 - type: accuracy name: Galician Test accuracy value: 89.5 - type: accuracy name: Vietnamese Test accuracy value: 66.5 - type: accuracy name: Greek Test accuracy value: 87.2 - type: accuracy name: Catalan Test accuracy value: 93.7 - type: accuracy name: Czech Test accuracy value: 89.1 - type: accuracy name: Erzya Test accuracy value: 39.4 - type: accuracy name: Bhojpuri Test accuracy value: 48.3 - type: accuracy name: Thai Test accuracy value: 55.8 - type: accuracy name: Marathi Test accuracy value: 85.3 - type: accuracy name: Basque Test accuracy value: 77.1 - type: accuracy name: Slovak Test accuracy value: 89.7 - type: accuracy name: Kiche Test accuracy value: 30.7 - type: accuracy name: Yoruba Test accuracy value: 18.1 - type: accuracy name: Warlpiri Test accuracy value: 25.9 - type: accuracy name: Tamil Test accuracy value: 83.7 - type: accuracy name: Maltese Test accuracy value: 20.3 - type: accuracy name: Ancient Greek Test accuracy value: 64.0 - type: accuracy name: Icelandic Test accuracy value: 84.0 - type: accuracy name: Mbya Guarani Test accuracy value: 23.9 - type: accuracy name: Urdu Test accuracy value: 66.0 - type: accuracy name: Romanian Test accuracy value: 86.4 - type: accuracy name: Persian Test accuracy value: 78.3 - type: accuracy name: Apurina Test accuracy value: 26.1 - type: accuracy name: Japanese Test accuracy value: 23.9 - type: accuracy name: Hungarian Test accuracy value: 86.3 - type: accuracy name: Hindi Test accuracy value: 69.8 - type: accuracy name: Classical Chinese Test accuracy value: 26.7 - type: accuracy name: Komi Permyak Test accuracy value: 39.8 - type: accuracy name: Faroese Test accuracy value: 76.9 - type: accuracy name: Sanskrit Test accuracy value: 20.1 - type: accuracy name: Livvi Test accuracy value: 57.3 - type: accuracy name: Arabic Test accuracy value: 81.4 - type: accuracy name: Wolof Test accuracy value: 25.4 - type: accuracy name: Bulgarian Test accuracy value: 90.7 - type: accuracy name: Akuntsu Test accuracy value: 19.8 - type: accuracy name: Makurap Test accuracy value: 6.2 - type: accuracy name: Kangri Test accuracy value: 45.2 - type: accuracy name: Breton Test accuracy value: 64.8 - type: accuracy name: Telugu Test accuracy value: 85.2 - type: accuracy name: Cantonese Test accuracy value: 50.8 - type: accuracy name: Old Church Slavonic Test accuracy value: 51.6 - type: accuracy name: Karelian Test accuracy value: 69.9 - type: accuracy name: Upper Sorbian Test accuracy value: 73.8 - type: accuracy name: South Levantine Arabic Test accuracy value: 64.9 - type: accuracy name: Komi Zyrian Test accuracy value: 32.8 - type: accuracy name: Irish Test accuracy value: 65.2 - type: accuracy name: Nayini Test accuracy value: 42.3 - type: accuracy name: Munduruku Test accuracy value: 9.9 - type: accuracy name: Manx Test accuracy value: 26.7 - type: accuracy name: Skolt Sami Test accuracy value: 27.6 - type: accuracy name: Afrikaans Test accuracy value: 86.5 - type: accuracy name: Old Turkish Test accuracy value: 37.1 - type: accuracy name: Tupinamba Test accuracy value: 22.3 - type: accuracy name: Belarusian Test accuracy value: 89.1 - type: accuracy name: Serbian Test accuracy value: 90.9 - type: accuracy name: Moksha Test accuracy value: 37.4 - type: accuracy name: Western Armenian Test accuracy value: 79.2 - type: accuracy name: Scottish Gaelic Test accuracy value: 57.4 - type: accuracy name: Khunsari Test accuracy value: 39.2 - type: accuracy name: Hebrew Test accuracy value: 92.7 - type: accuracy name: Uyghur Test accuracy value: 73.4 - type: accuracy name: Chukchi Test accuracy value: 30.9 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Italian This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-it") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-it") ```
wietsedv/xlm-roberta-base-ft-udpos28-no
8b329ecdd156f8424b925ddd5472f508ab0f169a
2022-02-25T09:59:08.000Z
[ "pytorch", "xlm-roberta", "token-classification", "no", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-no
2
null
transformers
24,932
--- language: - no license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-no results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 89.7 - type: accuracy name: Dutch Test accuracy value: 89.3 - type: accuracy name: German Test accuracy value: 87.8 - type: accuracy name: Italian Test accuracy value: 85.0 - type: accuracy name: French Test accuracy value: 83.9 - type: accuracy name: Spanish Test accuracy value: 88.4 - type: accuracy name: Russian Test accuracy value: 89.4 - type: accuracy name: Swedish Test accuracy value: 92.1 - type: accuracy name: Norwegian Test accuracy value: 97.1 - type: accuracy name: Danish Test accuracy value: 89.0 - type: accuracy name: Low Saxon Test accuracy value: 56.5 - type: accuracy name: Akkadian Test accuracy value: 32.3 - type: accuracy name: Armenian Test accuracy value: 86.2 - type: accuracy name: Welsh Test accuracy value: 67.9 - type: accuracy name: Old East Slavic Test accuracy value: 73.9 - type: accuracy name: Albanian Test accuracy value: 79.0 - type: accuracy name: Slovenian Test accuracy value: 78.9 - type: accuracy name: Guajajara Test accuracy value: 26.9 - type: accuracy name: Kurmanji Test accuracy value: 75.1 - type: accuracy name: Turkish Test accuracy value: 77.8 - type: accuracy name: Finnish Test accuracy value: 85.2 - type: accuracy name: Indonesian Test accuracy value: 85.9 - type: accuracy name: Ukrainian Test accuracy value: 87.6 - type: accuracy name: Polish Test accuracy value: 87.0 - type: accuracy name: Portuguese Test accuracy value: 88.0 - type: accuracy name: Kazakh Test accuracy value: 82.9 - type: accuracy name: Latin Test accuracy value: 78.9 - type: accuracy name: Old French Test accuracy value: 51.2 - type: accuracy name: Buryat Test accuracy value: 61.0 - type: accuracy name: Kaapor Test accuracy value: 13.8 - type: accuracy name: Korean Test accuracy value: 62.8 - type: accuracy name: Estonian Test accuracy value: 87.9 - type: accuracy name: Croatian Test accuracy value: 88.8 - type: accuracy name: Gothic Test accuracy value: 25.8 - type: accuracy name: Swiss German Test accuracy value: 44.0 - type: accuracy name: Assyrian Test accuracy value: 15.0 - type: accuracy name: North Sami Test accuracy value: 43.0 - type: accuracy name: Naija Test accuracy value: 41.5 - type: accuracy name: Latvian Test accuracy value: 85.2 - type: accuracy name: Chinese Test accuracy value: 46.6 - type: accuracy name: Tagalog Test accuracy value: 73.1 - type: accuracy name: Bambara Test accuracy value: 29.0 - type: accuracy name: Lithuanian Test accuracy value: 84.1 - type: accuracy name: Galician Test accuracy value: 84.9 - type: accuracy name: Vietnamese Test accuracy value: 66.4 - type: accuracy name: Greek Test accuracy value: 83.0 - type: accuracy name: Catalan Test accuracy value: 88.8 - type: accuracy name: Czech Test accuracy value: 87.3 - type: accuracy name: Erzya Test accuracy value: 50.3 - type: accuracy name: Bhojpuri Test accuracy value: 52.0 - type: accuracy name: Thai Test accuracy value: 65.6 - type: accuracy name: Marathi Test accuracy value: 89.0 - type: accuracy name: Basque Test accuracy value: 74.5 - type: accuracy name: Slovak Test accuracy value: 88.8 - type: accuracy name: Kiche Test accuracy value: 35.4 - type: accuracy name: Yoruba Test accuracy value: 28.2 - type: accuracy name: Warlpiri Test accuracy value: 39.3 - type: accuracy name: Tamil Test accuracy value: 83.5 - type: accuracy name: Maltese Test accuracy value: 30.4 - type: accuracy name: Ancient Greek Test accuracy value: 63.7 - type: accuracy name: Icelandic Test accuracy value: 84.3 - type: accuracy name: Mbya Guarani Test accuracy value: 32.9 - type: accuracy name: Urdu Test accuracy value: 69.4 - type: accuracy name: Romanian Test accuracy value: 83.8 - type: accuracy name: Persian Test accuracy value: 78.6 - type: accuracy name: Apurina Test accuracy value: 45.4 - type: accuracy name: Japanese Test accuracy value: 33.2 - type: accuracy name: Hungarian Test accuracy value: 84.5 - type: accuracy name: Hindi Test accuracy value: 74.9 - type: accuracy name: Classical Chinese Test accuracy value: 31.3 - type: accuracy name: Komi Permyak Test accuracy value: 50.9 - type: accuracy name: Faroese Test accuracy value: 80.8 - type: accuracy name: Sanskrit Test accuracy value: 35.6 - type: accuracy name: Livvi Test accuracy value: 67.6 - type: accuracy name: Arabic Test accuracy value: 80.4 - type: accuracy name: Wolof Test accuracy value: 35.5 - type: accuracy name: Bulgarian Test accuracy value: 90.7 - type: accuracy name: Akuntsu Test accuracy value: 32.9 - type: accuracy name: Makurap Test accuracy value: 17.8 - type: accuracy name: Kangri Test accuracy value: 48.1 - type: accuracy name: Breton Test accuracy value: 61.9 - type: accuracy name: Telugu Test accuracy value: 85.3 - type: accuracy name: Cantonese Test accuracy value: 50.1 - type: accuracy name: Old Church Slavonic Test accuracy value: 47.8 - type: accuracy name: Karelian Test accuracy value: 71.8 - type: accuracy name: Upper Sorbian Test accuracy value: 78.4 - type: accuracy name: South Levantine Arabic Test accuracy value: 67.3 - type: accuracy name: Komi Zyrian Test accuracy value: 44.4 - type: accuracy name: Irish Test accuracy value: 69.9 - type: accuracy name: Nayini Test accuracy value: 41.0 - type: accuracy name: Munduruku Test accuracy value: 21.6 - type: accuracy name: Manx Test accuracy value: 35.0 - type: accuracy name: Skolt Sami Test accuracy value: 38.9 - type: accuracy name: Afrikaans Test accuracy value: 86.7 - type: accuracy name: Old Turkish Test accuracy value: 37.1 - type: accuracy name: Tupinamba Test accuracy value: 40.4 - type: accuracy name: Belarusian Test accuracy value: 88.2 - type: accuracy name: Serbian Test accuracy value: 89.9 - type: accuracy name: Moksha Test accuracy value: 47.4 - type: accuracy name: Western Armenian Test accuracy value: 78.4 - type: accuracy name: Scottish Gaelic Test accuracy value: 58.3 - type: accuracy name: Khunsari Test accuracy value: 43.2 - type: accuracy name: Hebrew Test accuracy value: 89.6 - type: accuracy name: Uyghur Test accuracy value: 76.5 - type: accuracy name: Chukchi Test accuracy value: 37.9 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Norwegian This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-no") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-no") ```
wietsedv/xlm-roberta-base-ft-udpos28-ro
566c9f3700d2a7303cf80c6eb1ed0aba1c346540
2022-02-25T09:59:16.000Z
[ "pytorch", "xlm-roberta", "token-classification", "ro", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-ro
2
null
transformers
24,933
--- language: - ro license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-ro results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 88.4 - type: accuracy name: Dutch Test accuracy value: 86.1 - type: accuracy name: German Test accuracy value: 87.3 - type: accuracy name: Italian Test accuracy value: 88.2 - type: accuracy name: French Test accuracy value: 91.3 - type: accuracy name: Spanish Test accuracy value: 91.1 - type: accuracy name: Russian Test accuracy value: 90.4 - type: accuracy name: Swedish Test accuracy value: 90.7 - type: accuracy name: Norwegian Test accuracy value: 85.0 - type: accuracy name: Danish Test accuracy value: 91.0 - type: accuracy name: Low Saxon Test accuracy value: 56.2 - type: accuracy name: Akkadian Test accuracy value: 41.8 - type: accuracy name: Armenian Test accuracy value: 88.4 - type: accuracy name: Welsh Test accuracy value: 71.7 - type: accuracy name: Old East Slavic Test accuracy value: 78.7 - type: accuracy name: Albanian Test accuracy value: 90.2 - type: accuracy name: Slovenian Test accuracy value: 80.3 - type: accuracy name: Guajajara Test accuracy value: 39.3 - type: accuracy name: Kurmanji Test accuracy value: 79.5 - type: accuracy name: Turkish Test accuracy value: 79.5 - type: accuracy name: Finnish Test accuracy value: 86.0 - type: accuracy name: Indonesian Test accuracy value: 84.2 - type: accuracy name: Ukrainian Test accuracy value: 89.7 - type: accuracy name: Polish Test accuracy value: 89.5 - type: accuracy name: Portuguese Test accuracy value: 90.3 - type: accuracy name: Kazakh Test accuracy value: 85.0 - type: accuracy name: Latin Test accuracy value: 81.8 - type: accuracy name: Old French Test accuracy value: 65.7 - type: accuracy name: Buryat Test accuracy value: 64.9 - type: accuracy name: Kaapor Test accuracy value: 27.1 - type: accuracy name: Korean Test accuracy value: 64.3 - type: accuracy name: Estonian Test accuracy value: 87.5 - type: accuracy name: Croatian Test accuracy value: 89.7 - type: accuracy name: Gothic Test accuracy value: 35.1 - type: accuracy name: Swiss German Test accuracy value: 55.5 - type: accuracy name: Assyrian Test accuracy value: 16.8 - type: accuracy name: North Sami Test accuracy value: 45.0 - type: accuracy name: Naija Test accuracy value: 43.8 - type: accuracy name: Latvian Test accuracy value: 89.5 - type: accuracy name: Chinese Test accuracy value: 54.9 - type: accuracy name: Tagalog Test accuracy value: 74.0 - type: accuracy name: Bambara Test accuracy value: 32.9 - type: accuracy name: Lithuanian Test accuracy value: 87.7 - type: accuracy name: Galician Test accuracy value: 89.9 - type: accuracy name: Vietnamese Test accuracy value: 66.2 - type: accuracy name: Greek Test accuracy value: 88.9 - type: accuracy name: Catalan Test accuracy value: 90.0 - type: accuracy name: Czech Test accuracy value: 89.8 - type: accuracy name: Erzya Test accuracy value: 51.5 - type: accuracy name: Bhojpuri Test accuracy value: 55.0 - type: accuracy name: Thai Test accuracy value: 64.9 - type: accuracy name: Marathi Test accuracy value: 87.1 - type: accuracy name: Basque Test accuracy value: 80.7 - type: accuracy name: Slovak Test accuracy value: 89.8 - type: accuracy name: Kiche Test accuracy value: 42.4 - type: accuracy name: Yoruba Test accuracy value: 30.3 - type: accuracy name: Warlpiri Test accuracy value: 46.2 - type: accuracy name: Tamil Test accuracy value: 82.5 - type: accuracy name: Maltese Test accuracy value: 38.3 - type: accuracy name: Ancient Greek Test accuracy value: 67.8 - type: accuracy name: Icelandic Test accuracy value: 85.1 - type: accuracy name: Mbya Guarani Test accuracy value: 34.4 - type: accuracy name: Urdu Test accuracy value: 63.4 - type: accuracy name: Romanian Test accuracy value: 96.8 - type: accuracy name: Persian Test accuracy value: 79.0 - type: accuracy name: Apurina Test accuracy value: 43.1 - type: accuracy name: Japanese Test accuracy value: 43.7 - type: accuracy name: Hungarian Test accuracy value: 79.9 - type: accuracy name: Hindi Test accuracy value: 70.6 - type: accuracy name: Classical Chinese Test accuracy value: 40.8 - type: accuracy name: Komi Permyak Test accuracy value: 57.2 - type: accuracy name: Faroese Test accuracy value: 80.9 - type: accuracy name: Sanskrit Test accuracy value: 40.4 - type: accuracy name: Livvi Test accuracy value: 66.9 - type: accuracy name: Arabic Test accuracy value: 83.5 - type: accuracy name: Wolof Test accuracy value: 43.1 - type: accuracy name: Bulgarian Test accuracy value: 91.2 - type: accuracy name: Akuntsu Test accuracy value: 40.6 - type: accuracy name: Makurap Test accuracy value: 20.5 - type: accuracy name: Kangri Test accuracy value: 53.7 - type: accuracy name: Breton Test accuracy value: 68.7 - type: accuracy name: Telugu Test accuracy value: 82.9 - type: accuracy name: Cantonese Test accuracy value: 57.0 - type: accuracy name: Old Church Slavonic Test accuracy value: 59.1 - type: accuracy name: Karelian Test accuracy value: 75.0 - type: accuracy name: Upper Sorbian Test accuracy value: 77.8 - type: accuracy name: South Levantine Arabic Test accuracy value: 71.2 - type: accuracy name: Komi Zyrian Test accuracy value: 47.0 - type: accuracy name: Irish Test accuracy value: 69.4 - type: accuracy name: Nayini Test accuracy value: 56.4 - type: accuracy name: Munduruku Test accuracy value: 29.2 - type: accuracy name: Manx Test accuracy value: 38.8 - type: accuracy name: Skolt Sami Test accuracy value: 43.7 - type: accuracy name: Afrikaans Test accuracy value: 88.2 - type: accuracy name: Old Turkish Test accuracy value: 37.1 - type: accuracy name: Tupinamba Test accuracy value: 44.5 - type: accuracy name: Belarusian Test accuracy value: 90.4 - type: accuracy name: Serbian Test accuracy value: 89.5 - type: accuracy name: Moksha Test accuracy value: 49.1 - type: accuracy name: Western Armenian Test accuracy value: 82.0 - type: accuracy name: Scottish Gaelic Test accuracy value: 63.1 - type: accuracy name: Khunsari Test accuracy value: 47.3 - type: accuracy name: Hebrew Test accuracy value: 88.5 - type: accuracy name: Uyghur Test accuracy value: 78.0 - type: accuracy name: Chukchi Test accuracy value: 37.5 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Romanian This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-ro") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-ro") ```
wietsedv/xlm-roberta-base-ft-udpos28-sa
f8a69a0f6883dcb1331c62f53c49889e69699344
2022-02-25T09:59:19.000Z
[ "pytorch", "xlm-roberta", "token-classification", "sa", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-sa
2
null
transformers
24,934
--- language: - sa license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-sa results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 31.4 - type: accuracy name: Dutch Test accuracy value: 28.4 - type: accuracy name: German Test accuracy value: 32.3 - type: accuracy name: Italian Test accuracy value: 28.3 - type: accuracy name: French Test accuracy value: 28.1 - type: accuracy name: Spanish Test accuracy value: 28.5 - type: accuracy name: Russian Test accuracy value: 37.5 - type: accuracy name: Swedish Test accuracy value: 35.7 - type: accuracy name: Norwegian Test accuracy value: 32.0 - type: accuracy name: Danish Test accuracy value: 32.7 - type: accuracy name: Low Saxon Test accuracy value: 28.0 - type: accuracy name: Akkadian Test accuracy value: 26.2 - type: accuracy name: Armenian Test accuracy value: 39.0 - type: accuracy name: Welsh Test accuracy value: 23.9 - type: accuracy name: Old East Slavic Test accuracy value: 36.8 - type: accuracy name: Albanian Test accuracy value: 34.1 - type: accuracy name: Slovenian Test accuracy value: 30.4 - type: accuracy name: Guajajara Test accuracy value: 16.6 - type: accuracy name: Kurmanji Test accuracy value: 34.8 - type: accuracy name: Turkish Test accuracy value: 42.8 - type: accuracy name: Finnish Test accuracy value: 42.5 - type: accuracy name: Indonesian Test accuracy value: 34.5 - type: accuracy name: Ukrainian Test accuracy value: 38.2 - type: accuracy name: Polish Test accuracy value: 36.6 - type: accuracy name: Portuguese Test accuracy value: 30.7 - type: accuracy name: Kazakh Test accuracy value: 44.2 - type: accuracy name: Latin Test accuracy value: 38.1 - type: accuracy name: Old French Test accuracy value: 35.3 - type: accuracy name: Buryat Test accuracy value: 33.0 - type: accuracy name: Kaapor Test accuracy value: 29.2 - type: accuracy name: Korean Test accuracy value: 39.6 - type: accuracy name: Estonian Test accuracy value: 41.1 - type: accuracy name: Croatian Test accuracy value: 34.9 - type: accuracy name: Gothic Test accuracy value: 26.7 - type: accuracy name: Swiss German Test accuracy value: 23.6 - type: accuracy name: Assyrian Test accuracy value: 9.7 - type: accuracy name: North Sami Test accuracy value: 21.7 - type: accuracy name: Naija Test accuracy value: 24.0 - type: accuracy name: Latvian Test accuracy value: 42.3 - type: accuracy name: Chinese Test accuracy value: 29.3 - type: accuracy name: Tagalog Test accuracy value: 34.6 - type: accuracy name: Bambara Test accuracy value: 12.0 - type: accuracy name: Lithuanian Test accuracy value: 43.5 - type: accuracy name: Galician Test accuracy value: 28.7 - type: accuracy name: Vietnamese Test accuracy value: 36.4 - type: accuracy name: Greek Test accuracy value: 32.5 - type: accuracy name: Catalan Test accuracy value: 25.7 - type: accuracy name: Czech Test accuracy value: 36.8 - type: accuracy name: Erzya Test accuracy value: 20.0 - type: accuracy name: Bhojpuri Test accuracy value: 27.3 - type: accuracy name: Thai Test accuracy value: 32.4 - type: accuracy name: Marathi Test accuracy value: 37.4 - type: accuracy name: Basque Test accuracy value: 38.3 - type: accuracy name: Slovak Test accuracy value: 37.2 - type: accuracy name: Kiche Test accuracy value: 17.2 - type: accuracy name: Yoruba Test accuracy value: 13.2 - type: accuracy name: Warlpiri Test accuracy value: 21.5 - type: accuracy name: Tamil Test accuracy value: 42.5 - type: accuracy name: Maltese Test accuracy value: 17.5 - type: accuracy name: Ancient Greek Test accuracy value: 37.4 - type: accuracy name: Icelandic Test accuracy value: 32.7 - type: accuracy name: Mbya Guarani Test accuracy value: 13.9 - type: accuracy name: Urdu Test accuracy value: 28.1 - type: accuracy name: Romanian Test accuracy value: 34.8 - type: accuracy name: Persian Test accuracy value: 36.2 - type: accuracy name: Apurina Test accuracy value: 21.9 - type: accuracy name: Japanese Test accuracy value: 26.3 - type: accuracy name: Hungarian Test accuracy value: 34.6 - type: accuracy name: Hindi Test accuracy value: 29.3 - type: accuracy name: Classical Chinese Test accuracy value: 30.0 - type: accuracy name: Komi Permyak Test accuracy value: 26.1 - type: accuracy name: Faroese Test accuracy value: 24.8 - type: accuracy name: Sanskrit Test accuracy value: 84.2 - type: accuracy name: Livvi Test accuracy value: 29.7 - type: accuracy name: Arabic Test accuracy value: 32.6 - type: accuracy name: Wolof Test accuracy value: 16.7 - type: accuracy name: Bulgarian Test accuracy value: 35.4 - type: accuracy name: Akuntsu Test accuracy value: 23.9 - type: accuracy name: Makurap Test accuracy value: 14.4 - type: accuracy name: Kangri Test accuracy value: 27.8 - type: accuracy name: Breton Test accuracy value: 27.6 - type: accuracy name: Telugu Test accuracy value: 50.6 - type: accuracy name: Cantonese Test accuracy value: 31.6 - type: accuracy name: Old Church Slavonic Test accuracy value: 43.2 - type: accuracy name: Karelian Test accuracy value: 34.1 - type: accuracy name: Upper Sorbian Test accuracy value: 28.5 - type: accuracy name: South Levantine Arabic Test accuracy value: 30.8 - type: accuracy name: Komi Zyrian Test accuracy value: 25.5 - type: accuracy name: Irish Test accuracy value: 20.8 - type: accuracy name: Nayini Test accuracy value: 29.5 - type: accuracy name: Munduruku Test accuracy value: 15.6 - type: accuracy name: Manx Test accuracy value: 15.9 - type: accuracy name: Skolt Sami Test accuracy value: 18.9 - type: accuracy name: Afrikaans Test accuracy value: 34.5 - type: accuracy name: Old Turkish Test accuracy value: 6.3 - type: accuracy name: Tupinamba Test accuracy value: 25.2 - type: accuracy name: Belarusian Test accuracy value: 39.3 - type: accuracy name: Serbian Test accuracy value: 33.7 - type: accuracy name: Moksha Test accuracy value: 21.8 - type: accuracy name: Western Armenian Test accuracy value: 38.3 - type: accuracy name: Scottish Gaelic Test accuracy value: 23.3 - type: accuracy name: Khunsari Test accuracy value: 29.7 - type: accuracy name: Hebrew Test accuracy value: 39.6 - type: accuracy name: Uyghur Test accuracy value: 50.1 - type: accuracy name: Chukchi Test accuracy value: 14.8 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Sanskrit This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-sa") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-sa") ```
wietsedv/xlm-roberta-base-ft-udpos28-sk
c56c2d068c8d7098a6e6cbb0ed8a5689815b2f68
2022-02-25T09:59:20.000Z
[ "pytorch", "xlm-roberta", "token-classification", "sk", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-sk
2
null
transformers
24,935
--- language: - sk license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-sk results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 82.6 - type: accuracy name: Dutch Test accuracy value: 84.2 - type: accuracy name: German Test accuracy value: 79.4 - type: accuracy name: Italian Test accuracy value: 82.0 - type: accuracy name: French Test accuracy value: 83.9 - type: accuracy name: Spanish Test accuracy value: 87.9 - type: accuracy name: Russian Test accuracy value: 90.5 - type: accuracy name: Swedish Test accuracy value: 84.6 - type: accuracy name: Norwegian Test accuracy value: 77.9 - type: accuracy name: Danish Test accuracy value: 82.2 - type: accuracy name: Low Saxon Test accuracy value: 53.9 - type: accuracy name: Akkadian Test accuracy value: 35.8 - type: accuracy name: Armenian Test accuracy value: 83.8 - type: accuracy name: Welsh Test accuracy value: 64.8 - type: accuracy name: Old East Slavic Test accuracy value: 74.9 - type: accuracy name: Albanian Test accuracy value: 77.9 - type: accuracy name: Slovenian Test accuracy value: 87.7 - type: accuracy name: Guajajara Test accuracy value: 36.6 - type: accuracy name: Kurmanji Test accuracy value: 76.5 - type: accuracy name: Turkish Test accuracy value: 75.1 - type: accuracy name: Finnish Test accuracy value: 79.5 - type: accuracy name: Indonesian Test accuracy value: 81.3 - type: accuracy name: Ukrainian Test accuracy value: 92.0 - type: accuracy name: Polish Test accuracy value: 93.3 - type: accuracy name: Portuguese Test accuracy value: 85.1 - type: accuracy name: Kazakh Test accuracy value: 79.5 - type: accuracy name: Latin Test accuracy value: 77.1 - type: accuracy name: Old French Test accuracy value: 58.0 - type: accuracy name: Buryat Test accuracy value: 60.6 - type: accuracy name: Kaapor Test accuracy value: 22.1 - type: accuracy name: Korean Test accuracy value: 57.4 - type: accuracy name: Estonian Test accuracy value: 80.7 - type: accuracy name: Croatian Test accuracy value: 93.7 - type: accuracy name: Gothic Test accuracy value: 28.3 - type: accuracy name: Swiss German Test accuracy value: 44.1 - type: accuracy name: Assyrian Test accuracy value: 14.8 - type: accuracy name: North Sami Test accuracy value: 40.6 - type: accuracy name: Naija Test accuracy value: 39.9 - type: accuracy name: Latvian Test accuracy value: 84.2 - type: accuracy name: Chinese Test accuracy value: 42.5 - type: accuracy name: Tagalog Test accuracy value: 70.8 - type: accuracy name: Bambara Test accuracy value: 28.8 - type: accuracy name: Lithuanian Test accuracy value: 85.8 - type: accuracy name: Galician Test accuracy value: 86.1 - type: accuracy name: Vietnamese Test accuracy value: 67.4 - type: accuracy name: Greek Test accuracy value: 84.6 - type: accuracy name: Catalan Test accuracy value: 85.8 - type: accuracy name: Czech Test accuracy value: 94.3 - type: accuracy name: Erzya Test accuracy value: 49.8 - type: accuracy name: Bhojpuri Test accuracy value: 48.1 - type: accuracy name: Thai Test accuracy value: 58.1 - type: accuracy name: Marathi Test accuracy value: 87.7 - type: accuracy name: Basque Test accuracy value: 74.0 - type: accuracy name: Slovak Test accuracy value: 97.5 - type: accuracy name: Kiche Test accuracy value: 33.9 - type: accuracy name: Yoruba Test accuracy value: 26.9 - type: accuracy name: Warlpiri Test accuracy value: 42.1 - type: accuracy name: Tamil Test accuracy value: 83.0 - type: accuracy name: Maltese Test accuracy value: 29.1 - type: accuracy name: Ancient Greek Test accuracy value: 59.0 - type: accuracy name: Icelandic Test accuracy value: 77.4 - type: accuracy name: Mbya Guarani Test accuracy value: 33.1 - type: accuracy name: Urdu Test accuracy value: 62.2 - type: accuracy name: Romanian Test accuracy value: 81.4 - type: accuracy name: Persian Test accuracy value: 77.9 - type: accuracy name: Apurina Test accuracy value: 46.7 - type: accuracy name: Japanese Test accuracy value: 27.4 - type: accuracy name: Hungarian Test accuracy value: 81.9 - type: accuracy name: Hindi Test accuracy value: 65.3 - type: accuracy name: Classical Chinese Test accuracy value: 30.2 - type: accuracy name: Komi Permyak Test accuracy value: 48.7 - type: accuracy name: Faroese Test accuracy value: 75.4 - type: accuracy name: Sanskrit Test accuracy value: 36.3 - type: accuracy name: Livvi Test accuracy value: 64.9 - type: accuracy name: Arabic Test accuracy value: 79.6 - type: accuracy name: Wolof Test accuracy value: 39.0 - type: accuracy name: Bulgarian Test accuracy value: 90.5 - type: accuracy name: Akuntsu Test accuracy value: 39.1 - type: accuracy name: Makurap Test accuracy value: 24.7 - type: accuracy name: Kangri Test accuracy value: 49.9 - type: accuracy name: Breton Test accuracy value: 61.8 - type: accuracy name: Telugu Test accuracy value: 79.6 - type: accuracy name: Cantonese Test accuracy value: 45.6 - type: accuracy name: Old Church Slavonic Test accuracy value: 45.9 - type: accuracy name: Karelian Test accuracy value: 67.9 - type: accuracy name: Upper Sorbian Test accuracy value: 78.6 - type: accuracy name: South Levantine Arabic Test accuracy value: 66.7 - type: accuracy name: Komi Zyrian Test accuracy value: 44.2 - type: accuracy name: Irish Test accuracy value: 67.2 - type: accuracy name: Nayini Test accuracy value: 43.6 - type: accuracy name: Munduruku Test accuracy value: 27.3 - type: accuracy name: Manx Test accuracy value: 36.8 - type: accuracy name: Skolt Sami Test accuracy value: 41.3 - type: accuracy name: Afrikaans Test accuracy value: 79.2 - type: accuracy name: Old Turkish Test accuracy value: 38.0 - type: accuracy name: Tupinamba Test accuracy value: 40.3 - type: accuracy name: Belarusian Test accuracy value: 89.8 - type: accuracy name: Serbian Test accuracy value: 94.6 - type: accuracy name: Moksha Test accuracy value: 48.2 - type: accuracy name: Western Armenian Test accuracy value: 76.0 - type: accuracy name: Scottish Gaelic Test accuracy value: 57.0 - type: accuracy name: Khunsari Test accuracy value: 37.8 - type: accuracy name: Hebrew Test accuracy value: 81.2 - type: accuracy name: Uyghur Test accuracy value: 72.4 - type: accuracy name: Chukchi Test accuracy value: 37.0 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Slovak This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-sk") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-sk") ```
wietsedv/xlm-roberta-base-ft-udpos28-sr
291ad99ad23f5aa778290b670d5a55a0ef4eeba7
2022-02-25T09:59:25.000Z
[ "pytorch", "xlm-roberta", "token-classification", "sr", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-sr
2
null
transformers
24,936
--- language: - sr license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-sr results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 82.9 - type: accuracy name: Dutch Test accuracy value: 84.0 - type: accuracy name: German Test accuracy value: 82.7 - type: accuracy name: Italian Test accuracy value: 82.6 - type: accuracy name: French Test accuracy value: 83.6 - type: accuracy name: Spanish Test accuracy value: 87.3 - type: accuracy name: Russian Test accuracy value: 90.6 - type: accuracy name: Swedish Test accuracy value: 85.5 - type: accuracy name: Norwegian Test accuracy value: 79.0 - type: accuracy name: Danish Test accuracy value: 84.1 - type: accuracy name: Low Saxon Test accuracy value: 47.9 - type: accuracy name: Akkadian Test accuracy value: 30.2 - type: accuracy name: Armenian Test accuracy value: 84.2 - type: accuracy name: Welsh Test accuracy value: 67.4 - type: accuracy name: Old East Slavic Test accuracy value: 75.9 - type: accuracy name: Albanian Test accuracy value: 74.6 - type: accuracy name: Slovenian Test accuracy value: 85.8 - type: accuracy name: Guajajara Test accuracy value: 25.6 - type: accuracy name: Kurmanji Test accuracy value: 75.8 - type: accuracy name: Turkish Test accuracy value: 76.2 - type: accuracy name: Finnish Test accuracy value: 81.7 - type: accuracy name: Indonesian Test accuracy value: 80.5 - type: accuracy name: Ukrainian Test accuracy value: 92.3 - type: accuracy name: Polish Test accuracy value: 91.8 - type: accuracy name: Portuguese Test accuracy value: 84.7 - type: accuracy name: Kazakh Test accuracy value: 79.7 - type: accuracy name: Latin Test accuracy value: 77.0 - type: accuracy name: Old French Test accuracy value: 54.3 - type: accuracy name: Buryat Test accuracy value: 58.6 - type: accuracy name: Kaapor Test accuracy value: 14.6 - type: accuracy name: Korean Test accuracy value: 60.6 - type: accuracy name: Estonian Test accuracy value: 84.4 - type: accuracy name: Croatian Test accuracy value: 97.0 - type: accuracy name: Gothic Test accuracy value: 17.1 - type: accuracy name: Swiss German Test accuracy value: 42.9 - type: accuracy name: Assyrian Test accuracy value: 16.1 - type: accuracy name: North Sami Test accuracy value: 31.2 - type: accuracy name: Naija Test accuracy value: 38.7 - type: accuracy name: Latvian Test accuracy value: 85.1 - type: accuracy name: Chinese Test accuracy value: 41.3 - type: accuracy name: Tagalog Test accuracy value: 77.5 - type: accuracy name: Bambara Test accuracy value: 27.6 - type: accuracy name: Lithuanian Test accuracy value: 85.3 - type: accuracy name: Galician Test accuracy value: 84.9 - type: accuracy name: Vietnamese Test accuracy value: 65.8 - type: accuracy name: Greek Test accuracy value: 83.9 - type: accuracy name: Catalan Test accuracy value: 85.7 - type: accuracy name: Czech Test accuracy value: 94.8 - type: accuracy name: Erzya Test accuracy value: 43.1 - type: accuracy name: Bhojpuri Test accuracy value: 47.9 - type: accuracy name: Thai Test accuracy value: 60.5 - type: accuracy name: Marathi Test accuracy value: 84.0 - type: accuracy name: Basque Test accuracy value: 74.9 - type: accuracy name: Slovak Test accuracy value: 94.6 - type: accuracy name: Kiche Test accuracy value: 31.5 - type: accuracy name: Yoruba Test accuracy value: 21.8 - type: accuracy name: Warlpiri Test accuracy value: 37.7 - type: accuracy name: Tamil Test accuracy value: 83.9 - type: accuracy name: Maltese Test accuracy value: 22.7 - type: accuracy name: Ancient Greek Test accuracy value: 59.0 - type: accuracy name: Icelandic Test accuracy value: 79.6 - type: accuracy name: Mbya Guarani Test accuracy value: 29.4 - type: accuracy name: Urdu Test accuracy value: 63.0 - type: accuracy name: Romanian Test accuracy value: 82.1 - type: accuracy name: Persian Test accuracy value: 78.7 - type: accuracy name: Apurina Test accuracy value: 30.1 - type: accuracy name: Japanese Test accuracy value: 28.7 - type: accuracy name: Hungarian Test accuracy value: 78.4 - type: accuracy name: Hindi Test accuracy value: 66.6 - type: accuracy name: Classical Chinese Test accuracy value: 27.3 - type: accuracy name: Komi Permyak Test accuracy value: 40.2 - type: accuracy name: Faroese Test accuracy value: 76.1 - type: accuracy name: Sanskrit Test accuracy value: 32.5 - type: accuracy name: Livvi Test accuracy value: 62.6 - type: accuracy name: Arabic Test accuracy value: 80.9 - type: accuracy name: Wolof Test accuracy value: 30.7 - type: accuracy name: Bulgarian Test accuracy value: 92.2 - type: accuracy name: Akuntsu Test accuracy value: 32.6 - type: accuracy name: Makurap Test accuracy value: 12.3 - type: accuracy name: Kangri Test accuracy value: 44.4 - type: accuracy name: Breton Test accuracy value: 58.0 - type: accuracy name: Telugu Test accuracy value: 77.8 - type: accuracy name: Cantonese Test accuracy value: 44.9 - type: accuracy name: Old Church Slavonic Test accuracy value: 45.4 - type: accuracy name: Karelian Test accuracy value: 69.8 - type: accuracy name: Upper Sorbian Test accuracy value: 77.5 - type: accuracy name: South Levantine Arabic Test accuracy value: 66.8 - type: accuracy name: Komi Zyrian Test accuracy value: 36.1 - type: accuracy name: Irish Test accuracy value: 67.9 - type: accuracy name: Nayini Test accuracy value: 44.9 - type: accuracy name: Munduruku Test accuracy value: 19.2 - type: accuracy name: Manx Test accuracy value: 33.1 - type: accuracy name: Skolt Sami Test accuracy value: 33.0 - type: accuracy name: Afrikaans Test accuracy value: 79.6 - type: accuracy name: Old Turkish Test accuracy value: 37.1 - type: accuracy name: Tupinamba Test accuracy value: 31.4 - type: accuracy name: Belarusian Test accuracy value: 91.0 - type: accuracy name: Serbian Test accuracy value: 99.1 - type: accuracy name: Moksha Test accuracy value: 40.2 - type: accuracy name: Western Armenian Test accuracy value: 75.8 - type: accuracy name: Scottish Gaelic Test accuracy value: 57.1 - type: accuracy name: Khunsari Test accuracy value: 32.4 - type: accuracy name: Hebrew Test accuracy value: 88.5 - type: accuracy name: Uyghur Test accuracy value: 71.0 - type: accuracy name: Chukchi Test accuracy value: 29.3 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Serbian This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-sr") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-sr") ```
wietsedv/xlm-roberta-base-ft-udpos28-te
d10e95d30450b526d60d9c9d86d063a9f93d1019
2022-02-25T09:59:30.000Z
[ "pytorch", "xlm-roberta", "token-classification", "te", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-te
2
null
transformers
24,937
--- language: - te license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-te results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 68.9 - type: accuracy name: Dutch Test accuracy value: 68.0 - type: accuracy name: German Test accuracy value: 67.0 - type: accuracy name: Italian Test accuracy value: 63.3 - type: accuracy name: French Test accuracy value: 62.1 - type: accuracy name: Spanish Test accuracy value: 63.1 - type: accuracy name: Russian Test accuracy value: 71.0 - type: accuracy name: Swedish Test accuracy value: 66.4 - type: accuracy name: Norwegian Test accuracy value: 62.1 - type: accuracy name: Danish Test accuracy value: 67.5 - type: accuracy name: Low Saxon Test accuracy value: 48.2 - type: accuracy name: Akkadian Test accuracy value: 37.4 - type: accuracy name: Armenian Test accuracy value: 72.5 - type: accuracy name: Welsh Test accuracy value: 54.5 - type: accuracy name: Old East Slavic Test accuracy value: 57.6 - type: accuracy name: Albanian Test accuracy value: 60.3 - type: accuracy name: Slovenian Test accuracy value: 58.6 - type: accuracy name: Guajajara Test accuracy value: 35.3 - type: accuracy name: Kurmanji Test accuracy value: 67.7 - type: accuracy name: Turkish Test accuracy value: 73.0 - type: accuracy name: Finnish Test accuracy value: 73.8 - type: accuracy name: Indonesian Test accuracy value: 69.0 - type: accuracy name: Ukrainian Test accuracy value: 71.3 - type: accuracy name: Polish Test accuracy value: 68.4 - type: accuracy name: Portuguese Test accuracy value: 66.3 - type: accuracy name: Kazakh Test accuracy value: 77.4 - type: accuracy name: Latin Test accuracy value: 65.1 - type: accuracy name: Old French Test accuracy value: 48.4 - type: accuracy name: Buryat Test accuracy value: 64.0 - type: accuracy name: Kaapor Test accuracy value: 33.8 - type: accuracy name: Korean Test accuracy value: 63.2 - type: accuracy name: Estonian Test accuracy value: 73.8 - type: accuracy name: Croatian Test accuracy value: 65.6 - type: accuracy name: Gothic Test accuracy value: 29.8 - type: accuracy name: Swiss German Test accuracy value: 48.0 - type: accuracy name: Assyrian Test accuracy value: 16.8 - type: accuracy name: North Sami Test accuracy value: 41.0 - type: accuracy name: Naija Test accuracy value: 38.1 - type: accuracy name: Latvian Test accuracy value: 77.6 - type: accuracy name: Chinese Test accuracy value: 62.0 - type: accuracy name: Tagalog Test accuracy value: 66.1 - type: accuracy name: Bambara Test accuracy value: 35.3 - type: accuracy name: Lithuanian Test accuracy value: 77.6 - type: accuracy name: Galician Test accuracy value: 62.9 - type: accuracy name: Vietnamese Test accuracy value: 59.5 - type: accuracy name: Greek Test accuracy value: 66.3 - type: accuracy name: Catalan Test accuracy value: 62.1 - type: accuracy name: Czech Test accuracy value: 69.1 - type: accuracy name: Erzya Test accuracy value: 50.3 - type: accuracy name: Bhojpuri Test accuracy value: 61.0 - type: accuracy name: Thai Test accuracy value: 57.3 - type: accuracy name: Marathi Test accuracy value: 79.8 - type: accuracy name: Basque Test accuracy value: 67.4 - type: accuracy name: Slovak Test accuracy value: 67.4 - type: accuracy name: Kiche Test accuracy value: 37.4 - type: accuracy name: Yoruba Test accuracy value: 33.5 - type: accuracy name: Warlpiri Test accuracy value: 49.0 - type: accuracy name: Tamil Test accuracy value: 89.3 - type: accuracy name: Maltese Test accuracy value: 34.9 - type: accuracy name: Ancient Greek Test accuracy value: 48.0 - type: accuracy name: Icelandic Test accuracy value: 63.5 - type: accuracy name: Mbya Guarani Test accuracy value: 35.4 - type: accuracy name: Urdu Test accuracy value: 69.8 - type: accuracy name: Romanian Test accuracy value: 62.8 - type: accuracy name: Persian Test accuracy value: 63.5 - type: accuracy name: Apurina Test accuracy value: 50.2 - type: accuracy name: Japanese Test accuracy value: 49.7 - type: accuracy name: Hungarian Test accuracy value: 74.9 - type: accuracy name: Hindi Test accuracy value: 73.3 - type: accuracy name: Classical Chinese Test accuracy value: 41.9 - type: accuracy name: Komi Permyak Test accuracy value: 50.1 - type: accuracy name: Faroese Test accuracy value: 57.0 - type: accuracy name: Sanskrit Test accuracy value: 46.1 - type: accuracy name: Livvi Test accuracy value: 63.3 - type: accuracy name: Arabic Test accuracy value: 62.7 - type: accuracy name: Wolof Test accuracy value: 40.2 - type: accuracy name: Bulgarian Test accuracy value: 67.3 - type: accuracy name: Akuntsu Test accuracy value: 43.2 - type: accuracy name: Makurap Test accuracy value: 27.4 - type: accuracy name: Kangri Test accuracy value: 51.0 - type: accuracy name: Breton Test accuracy value: 54.9 - type: accuracy name: Telugu Test accuracy value: 94.9 - type: accuracy name: Cantonese Test accuracy value: 60.4 - type: accuracy name: Old Church Slavonic Test accuracy value: 46.3 - type: accuracy name: Karelian Test accuracy value: 65.9 - type: accuracy name: Upper Sorbian Test accuracy value: 59.7 - type: accuracy name: South Levantine Arabic Test accuracy value: 61.5 - type: accuracy name: Komi Zyrian Test accuracy value: 45.2 - type: accuracy name: Irish Test accuracy value: 56.0 - type: accuracy name: Nayini Test accuracy value: 52.6 - type: accuracy name: Munduruku Test accuracy value: 36.2 - type: accuracy name: Manx Test accuracy value: 37.0 - type: accuracy name: Skolt Sami Test accuracy value: 46.7 - type: accuracy name: Afrikaans Test accuracy value: 64.3 - type: accuracy name: Old Turkish Test accuracy value: 39.8 - type: accuracy name: Tupinamba Test accuracy value: 45.1 - type: accuracy name: Belarusian Test accuracy value: 70.0 - type: accuracy name: Serbian Test accuracy value: 66.4 - type: accuracy name: Moksha Test accuracy value: 45.7 - type: accuracy name: Western Armenian Test accuracy value: 66.0 - type: accuracy name: Scottish Gaelic Test accuracy value: 52.6 - type: accuracy name: Khunsari Test accuracy value: 45.9 - type: accuracy name: Hebrew Test accuracy value: 74.0 - type: accuracy name: Uyghur Test accuracy value: 75.9 - type: accuracy name: Chukchi Test accuracy value: 40.8 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Telugu This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-te") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-te") ```
wietsedv/xlm-roberta-base-ft-udpos28-ug
55a0a39a4273216737196de5728dccfd380ed67e
2022-02-25T09:59:33.000Z
[ "pytorch", "xlm-roberta", "token-classification", "ug", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-ug
2
1
transformers
24,938
--- language: - ug license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-ug results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 60.9 - type: accuracy name: Dutch Test accuracy value: 57.8 - type: accuracy name: German Test accuracy value: 61.0 - type: accuracy name: Italian Test accuracy value: 59.4 - type: accuracy name: French Test accuracy value: 53.9 - type: accuracy name: Spanish Test accuracy value: 55.5 - type: accuracy name: Russian Test accuracy value: 71.6 - type: accuracy name: Swedish Test accuracy value: 65.9 - type: accuracy name: Norwegian Test accuracy value: 63.0 - type: accuracy name: Danish Test accuracy value: 64.4 - type: accuracy name: Low Saxon Test accuracy value: 44.5 - type: accuracy name: Akkadian Test accuracy value: 37.0 - type: accuracy name: Armenian Test accuracy value: 77.0 - type: accuracy name: Welsh Test accuracy value: 57.1 - type: accuracy name: Old East Slavic Test accuracy value: 58.4 - type: accuracy name: Albanian Test accuracy value: 63.4 - type: accuracy name: Slovenian Test accuracy value: 58.7 - type: accuracy name: Guajajara Test accuracy value: 38.2 - type: accuracy name: Kurmanji Test accuracy value: 71.3 - type: accuracy name: Turkish Test accuracy value: 74.6 - type: accuracy name: Finnish Test accuracy value: 76.0 - type: accuracy name: Indonesian Test accuracy value: 65.5 - type: accuracy name: Ukrainian Test accuracy value: 71.6 - type: accuracy name: Polish Test accuracy value: 67.9 - type: accuracy name: Portuguese Test accuracy value: 62.4 - type: accuracy name: Kazakh Test accuracy value: 82.0 - type: accuracy name: Latin Test accuracy value: 68.3 - type: accuracy name: Old French Test accuracy value: 45.0 - type: accuracy name: Buryat Test accuracy value: 61.5 - type: accuracy name: Kaapor Test accuracy value: 29.2 - type: accuracy name: Korean Test accuracy value: 61.7 - type: accuracy name: Estonian Test accuracy value: 74.8 - type: accuracy name: Croatian Test accuracy value: 64.6 - type: accuracy name: Gothic Test accuracy value: 23.8 - type: accuracy name: Swiss German Test accuracy value: 46.9 - type: accuracy name: Assyrian Test accuracy value: 29.4 - type: accuracy name: North Sami Test accuracy value: 42.7 - type: accuracy name: Naija Test accuracy value: 39.0 - type: accuracy name: Latvian Test accuracy value: 77.2 - type: accuracy name: Chinese Test accuracy value: 57.9 - type: accuracy name: Tagalog Test accuracy value: 61.5 - type: accuracy name: Bambara Test accuracy value: 35.8 - type: accuracy name: Lithuanian Test accuracy value: 79.1 - type: accuracy name: Galician Test accuracy value: 60.3 - type: accuracy name: Vietnamese Test accuracy value: 67.9 - type: accuracy name: Greek Test accuracy value: 61.4 - type: accuracy name: Catalan Test accuracy value: 50.3 - type: accuracy name: Czech Test accuracy value: 67.9 - type: accuracy name: Erzya Test accuracy value: 49.9 - type: accuracy name: Bhojpuri Test accuracy value: 55.0 - type: accuracy name: Thai Test accuracy value: 56.2 - type: accuracy name: Marathi Test accuracy value: 81.6 - type: accuracy name: Basque Test accuracy value: 70.3 - type: accuracy name: Slovak Test accuracy value: 63.9 - type: accuracy name: Kiche Test accuracy value: 35.6 - type: accuracy name: Yoruba Test accuracy value: 32.9 - type: accuracy name: Warlpiri Test accuracy value: 55.5 - type: accuracy name: Tamil Test accuracy value: 73.9 - type: accuracy name: Maltese Test accuracy value: 32.3 - type: accuracy name: Ancient Greek Test accuracy value: 51.7 - type: accuracy name: Icelandic Test accuracy value: 65.8 - type: accuracy name: Mbya Guarani Test accuracy value: 34.3 - type: accuracy name: Urdu Test accuracy value: 68.7 - type: accuracy name: Romanian Test accuracy value: 65.1 - type: accuracy name: Persian Test accuracy value: 74.1 - type: accuracy name: Apurina Test accuracy value: 45.9 - type: accuracy name: Japanese Test accuracy value: 47.5 - type: accuracy name: Hungarian Test accuracy value: 62.6 - type: accuracy name: Hindi Test accuracy value: 74.2 - type: accuracy name: Classical Chinese Test accuracy value: 40.9 - type: accuracy name: Komi Permyak Test accuracy value: 49.2 - type: accuracy name: Faroese Test accuracy value: 56.4 - type: accuracy name: Sanskrit Test accuracy value: 43.1 - type: accuracy name: Livvi Test accuracy value: 64.2 - type: accuracy name: Arabic Test accuracy value: 60.9 - type: accuracy name: Wolof Test accuracy value: 35.2 - type: accuracy name: Bulgarian Test accuracy value: 68.3 - type: accuracy name: Akuntsu Test accuracy value: 47.6 - type: accuracy name: Makurap Test accuracy value: 23.3 - type: accuracy name: Kangri Test accuracy value: 51.8 - type: accuracy name: Breton Test accuracy value: 52.0 - type: accuracy name: Telugu Test accuracy value: 82.8 - type: accuracy name: Cantonese Test accuracy value: 57.4 - type: accuracy name: Old Church Slavonic Test accuracy value: 41.9 - type: accuracy name: Karelian Test accuracy value: 64.6 - type: accuracy name: Upper Sorbian Test accuracy value: 59.8 - type: accuracy name: South Levantine Arabic Test accuracy value: 58.0 - type: accuracy name: Komi Zyrian Test accuracy value: 48.8 - type: accuracy name: Irish Test accuracy value: 51.8 - type: accuracy name: Nayini Test accuracy value: 55.1 - type: accuracy name: Munduruku Test accuracy value: 41.2 - type: accuracy name: Manx Test accuracy value: 36.9 - type: accuracy name: Skolt Sami Test accuracy value: 45.6 - type: accuracy name: Afrikaans Test accuracy value: 61.8 - type: accuracy name: Old Turkish Test accuracy value: 40.7 - type: accuracy name: Tupinamba Test accuracy value: 52.6 - type: accuracy name: Belarusian Test accuracy value: 71.2 - type: accuracy name: Serbian Test accuracy value: 63.1 - type: accuracy name: Moksha Test accuracy value: 49.0 - type: accuracy name: Western Armenian Test accuracy value: 71.8 - type: accuracy name: Scottish Gaelic Test accuracy value: 48.0 - type: accuracy name: Khunsari Test accuracy value: 52.7 - type: accuracy name: Hebrew Test accuracy value: 77.1 - type: accuracy name: Uyghur Test accuracy value: 89.9 - type: accuracy name: Chukchi Test accuracy value: 40.3 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Uyghur This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-ug") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-ug") ```
wietsedv/xlm-roberta-base-ft-udpos28-ur
479b80d1868cf69c462b956757ebaa2a0bc78843
2022-02-25T09:59:36.000Z
[ "pytorch", "xlm-roberta", "token-classification", "ur", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-ur
2
null
transformers
24,939
--- language: - ur license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-ur results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 76.9 - type: accuracy name: Dutch Test accuracy value: 74.3 - type: accuracy name: German Test accuracy value: 73.5 - type: accuracy name: Italian Test accuracy value: 71.0 - type: accuracy name: French Test accuracy value: 68.2 - type: accuracy name: Spanish Test accuracy value: 72.7 - type: accuracy name: Russian Test accuracy value: 85.9 - type: accuracy name: Swedish Test accuracy value: 80.0 - type: accuracy name: Norwegian Test accuracy value: 74.9 - type: accuracy name: Danish Test accuracy value: 77.4 - type: accuracy name: Low Saxon Test accuracy value: 46.2 - type: accuracy name: Akkadian Test accuracy value: 19.5 - type: accuracy name: Armenian Test accuracy value: 82.7 - type: accuracy name: Welsh Test accuracy value: 63.7 - type: accuracy name: Old East Slavic Test accuracy value: 69.3 - type: accuracy name: Albanian Test accuracy value: 71.8 - type: accuracy name: Slovenian Test accuracy value: 74.0 - type: accuracy name: Guajajara Test accuracy value: 19.2 - type: accuracy name: Kurmanji Test accuracy value: 75.2 - type: accuracy name: Turkish Test accuracy value: 76.7 - type: accuracy name: Finnish Test accuracy value: 80.4 - type: accuracy name: Indonesian Test accuracy value: 78.0 - type: accuracy name: Ukrainian Test accuracy value: 83.8 - type: accuracy name: Polish Test accuracy value: 83.5 - type: accuracy name: Portuguese Test accuracy value: 74.5 - type: accuracy name: Kazakh Test accuracy value: 82.6 - type: accuracy name: Latin Test accuracy value: 72.6 - type: accuracy name: Old French Test accuracy value: 43.4 - type: accuracy name: Buryat Test accuracy value: 49.7 - type: accuracy name: Kaapor Test accuracy value: 15.8 - type: accuracy name: Korean Test accuracy value: 59.0 - type: accuracy name: Estonian Test accuracy value: 81.0 - type: accuracy name: Croatian Test accuracy value: 82.0 - type: accuracy name: Gothic Test accuracy value: 5.8 - type: accuracy name: Swiss German Test accuracy value: 43.1 - type: accuracy name: Assyrian Test accuracy value: 17.2 - type: accuracy name: North Sami Test accuracy value: 22.3 - type: accuracy name: Naija Test accuracy value: 36.3 - type: accuracy name: Latvian Test accuracy value: 82.3 - type: accuracy name: Chinese Test accuracy value: 33.9 - type: accuracy name: Tagalog Test accuracy value: 78.5 - type: accuracy name: Bambara Test accuracy value: 18.7 - type: accuracy name: Lithuanian Test accuracy value: 82.9 - type: accuracy name: Galician Test accuracy value: 73.5 - type: accuracy name: Vietnamese Test accuracy value: 60.4 - type: accuracy name: Greek Test accuracy value: 68.1 - type: accuracy name: Catalan Test accuracy value: 70.9 - type: accuracy name: Czech Test accuracy value: 81.0 - type: accuracy name: Erzya Test accuracy value: 31.3 - type: accuracy name: Bhojpuri Test accuracy value: 62.1 - type: accuracy name: Thai Test accuracy value: 46.9 - type: accuracy name: Marathi Test accuracy value: 82.2 - type: accuracy name: Basque Test accuracy value: 77.8 - type: accuracy name: Slovak Test accuracy value: 80.8 - type: accuracy name: Kiche Test accuracy value: 21.2 - type: accuracy name: Yoruba Test accuracy value: 16.4 - type: accuracy name: Warlpiri Test accuracy value: 19.8 - type: accuracy name: Tamil Test accuracy value: 86.0 - type: accuracy name: Maltese Test accuracy value: 15.1 - type: accuracy name: Ancient Greek Test accuracy value: 56.3 - type: accuracy name: Icelandic Test accuracy value: 74.4 - type: accuracy name: Mbya Guarani Test accuracy value: 22.7 - type: accuracy name: Urdu Test accuracy value: 94.8 - type: accuracy name: Romanian Test accuracy value: 74.7 - type: accuracy name: Persian Test accuracy value: 80.6 - type: accuracy name: Apurina Test accuracy value: 21.6 - type: accuracy name: Japanese Test accuracy value: 29.6 - type: accuracy name: Hungarian Test accuracy value: 72.6 - type: accuracy name: Hindi Test accuracy value: 91.9 - type: accuracy name: Classical Chinese Test accuracy value: 16.8 - type: accuracy name: Komi Permyak Test accuracy value: 32.5 - type: accuracy name: Faroese Test accuracy value: 67.3 - type: accuracy name: Sanskrit Test accuracy value: 12.1 - type: accuracy name: Livvi Test accuracy value: 51.9 - type: accuracy name: Arabic Test accuracy value: 79.8 - type: accuracy name: Wolof Test accuracy value: 21.6 - type: accuracy name: Bulgarian Test accuracy value: 84.7 - type: accuracy name: Akuntsu Test accuracy value: 15.4 - type: accuracy name: Makurap Test accuracy value: 2.1 - type: accuracy name: Kangri Test accuracy value: 55.4 - type: accuracy name: Breton Test accuracy value: 49.5 - type: accuracy name: Telugu Test accuracy value: 85.2 - type: accuracy name: Cantonese Test accuracy value: 38.2 - type: accuracy name: Old Church Slavonic Test accuracy value: 38.1 - type: accuracy name: Karelian Test accuracy value: 61.1 - type: accuracy name: Upper Sorbian Test accuracy value: 64.6 - type: accuracy name: South Levantine Arabic Test accuracy value: 61.6 - type: accuracy name: Komi Zyrian Test accuracy value: 27.6 - type: accuracy name: Irish Test accuracy value: 62.7 - type: accuracy name: Nayini Test accuracy value: 41.0 - type: accuracy name: Munduruku Test accuracy value: 8.7 - type: accuracy name: Manx Test accuracy value: 20.1 - type: accuracy name: Skolt Sami Test accuracy value: 25.0 - type: accuracy name: Afrikaans Test accuracy value: 74.0 - type: accuracy name: Old Turkish Test accuracy value: 44.3 - type: accuracy name: Tupinamba Test accuracy value: 20.9 - type: accuracy name: Belarusian Test accuracy value: 82.6 - type: accuracy name: Serbian Test accuracy value: 82.4 - type: accuracy name: Moksha Test accuracy value: 28.5 - type: accuracy name: Western Armenian Test accuracy value: 74.1 - type: accuracy name: Scottish Gaelic Test accuracy value: 53.3 - type: accuracy name: Khunsari Test accuracy value: 43.2 - type: accuracy name: Hebrew Test accuracy value: 83.3 - type: accuracy name: Uyghur Test accuracy value: 75.8 - type: accuracy name: Chukchi Test accuracy value: 26.8 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Urdu This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-ur") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-ur") ```
wietsedv/xlm-roberta-base-ft-udpos28-wo
01d1bddea8f9faf3d9d9e2cf2bf61ff926413fa4
2022-02-25T09:59:39.000Z
[ "pytorch", "xlm-roberta", "token-classification", "wo", "dataset:universal_dependencies", "transformers", "part-of-speech", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
false
wietsedv
null
wietsedv/xlm-roberta-base-ft-udpos28-wo
2
null
transformers
24,940
--- language: - wo license: apache-2.0 library_name: transformers tags: - part-of-speech - token-classification datasets: - universal_dependencies metrics: - accuracy model-index: - name: xlm-roberta-base-ft-udpos28-wo results: - task: type: token-classification name: Part-of-Speech Tagging dataset: type: universal_dependencies name: Universal Dependencies v2.8 metrics: - type: accuracy name: English Test accuracy value: 51.4 - type: accuracy name: Dutch Test accuracy value: 52.2 - type: accuracy name: German Test accuracy value: 38.4 - type: accuracy name: Italian Test accuracy value: 51.2 - type: accuracy name: French Test accuracy value: 48.8 - type: accuracy name: Spanish Test accuracy value: 52.4 - type: accuracy name: Russian Test accuracy value: 57.3 - type: accuracy name: Swedish Test accuracy value: 49.0 - type: accuracy name: Norwegian Test accuracy value: 49.1 - type: accuracy name: Danish Test accuracy value: 52.4 - type: accuracy name: Low Saxon Test accuracy value: 34.5 - type: accuracy name: Akkadian Test accuracy value: 41.6 - type: accuracy name: Armenian Test accuracy value: 61.7 - type: accuracy name: Welsh Test accuracy value: 41.5 - type: accuracy name: Old East Slavic Test accuracy value: 48.3 - type: accuracy name: Albanian Test accuracy value: 51.8 - type: accuracy name: Slovenian Test accuracy value: 43.9 - type: accuracy name: Guajajara Test accuracy value: 32.0 - type: accuracy name: Kurmanji Test accuracy value: 46.5 - type: accuracy name: Turkish Test accuracy value: 56.7 - type: accuracy name: Finnish Test accuracy value: 58.5 - type: accuracy name: Indonesian Test accuracy value: 61.8 - type: accuracy name: Ukrainian Test accuracy value: 56.8 - type: accuracy name: Polish Test accuracy value: 55.2 - type: accuracy name: Portuguese Test accuracy value: 55.5 - type: accuracy name: Kazakh Test accuracy value: 63.6 - type: accuracy name: Latin Test accuracy value: 51.1 - type: accuracy name: Old French Test accuracy value: 33.8 - type: accuracy name: Buryat Test accuracy value: 54.2 - type: accuracy name: Kaapor Test accuracy value: 23.8 - type: accuracy name: Korean Test accuracy value: 52.5 - type: accuracy name: Estonian Test accuracy value: 60.2 - type: accuracy name: Croatian Test accuracy value: 52.4 - type: accuracy name: Gothic Test accuracy value: 23.0 - type: accuracy name: Swiss German Test accuracy value: 30.6 - type: accuracy name: Assyrian Test accuracy value: 18.8 - type: accuracy name: North Sami Test accuracy value: 42.8 - type: accuracy name: Naija Test accuracy value: 26.9 - type: accuracy name: Latvian Test accuracy value: 61.3 - type: accuracy name: Chinese Test accuracy value: 33.6 - type: accuracy name: Tagalog Test accuracy value: 62.2 - type: accuracy name: Bambara Test accuracy value: 33.8 - type: accuracy name: Lithuanian Test accuracy value: 61.0 - type: accuracy name: Galician Test accuracy value: 53.1 - type: accuracy name: Vietnamese Test accuracy value: 49.1 - type: accuracy name: Greek Test accuracy value: 46.2 - type: accuracy name: Catalan Test accuracy value: 52.9 - type: accuracy name: Czech Test accuracy value: 55.2 - type: accuracy name: Erzya Test accuracy value: 50.0 - type: accuracy name: Bhojpuri Test accuracy value: 43.1 - type: accuracy name: Thai Test accuracy value: 34.9 - type: accuracy name: Marathi Test accuracy value: 57.1 - type: accuracy name: Basque Test accuracy value: 66.6 - type: accuracy name: Slovak Test accuracy value: 58.8 - type: accuracy name: Kiche Test accuracy value: 50.1 - type: accuracy name: Yoruba Test accuracy value: 34.1 - type: accuracy name: Warlpiri Test accuracy value: 42.5 - type: accuracy name: Tamil Test accuracy value: 66.0 - type: accuracy name: Maltese Test accuracy value: 35.7 - type: accuracy name: Ancient Greek Test accuracy value: 39.3 - type: accuracy name: Icelandic Test accuracy value: 47.9 - type: accuracy name: Mbya Guarani Test accuracy value: 31.8 - type: accuracy name: Urdu Test accuracy value: 40.4 - type: accuracy name: Romanian Test accuracy value: 54.4 - type: accuracy name: Persian Test accuracy value: 46.2 - type: accuracy name: Apurina Test accuracy value: 58.3 - type: accuracy name: Japanese Test accuracy value: 31.0 - type: accuracy name: Hungarian Test accuracy value: 53.0 - type: accuracy name: Hindi Test accuracy value: 49.3 - type: accuracy name: Classical Chinese Test accuracy value: 24.8 - type: accuracy name: Komi Permyak Test accuracy value: 49.3 - type: accuracy name: Faroese Test accuracy value: 51.5 - type: accuracy name: Sanskrit Test accuracy value: 31.0 - type: accuracy name: Livvi Test accuracy value: 52.5 - type: accuracy name: Arabic Test accuracy value: 50.6 - type: accuracy name: Wolof Test accuracy value: 91.5 - type: accuracy name: Bulgarian Test accuracy value: 54.3 - type: accuracy name: Akuntsu Test accuracy value: 35.7 - type: accuracy name: Makurap Test accuracy value: 20.5 - type: accuracy name: Kangri Test accuracy value: 36.2 - type: accuracy name: Breton Test accuracy value: 46.9 - type: accuracy name: Telugu Test accuracy value: 63.5 - type: accuracy name: Cantonese Test accuracy value: 40.2 - type: accuracy name: Old Church Slavonic Test accuracy value: 27.7 - type: accuracy name: Karelian Test accuracy value: 55.2 - type: accuracy name: Upper Sorbian Test accuracy value: 52.5 - type: accuracy name: South Levantine Arabic Test accuracy value: 46.6 - type: accuracy name: Komi Zyrian Test accuracy value: 43.4 - type: accuracy name: Irish Test accuracy value: 44.3 - type: accuracy name: Nayini Test accuracy value: 46.2 - type: accuracy name: Munduruku Test accuracy value: 32.3 - type: accuracy name: Manx Test accuracy value: 38.2 - type: accuracy name: Skolt Sami Test accuracy value: 41.8 - type: accuracy name: Afrikaans Test accuracy value: 49.0 - type: accuracy name: Old Turkish Test accuracy value: 42.1 - type: accuracy name: Tupinamba Test accuracy value: 48.2 - type: accuracy name: Belarusian Test accuracy value: 61.1 - type: accuracy name: Serbian Test accuracy value: 52.9 - type: accuracy name: Moksha Test accuracy value: 47.3 - type: accuracy name: Western Armenian Test accuracy value: 62.9 - type: accuracy name: Scottish Gaelic Test accuracy value: 39.6 - type: accuracy name: Khunsari Test accuracy value: 36.5 - type: accuracy name: Hebrew Test accuracy value: 64.6 - type: accuracy name: Uyghur Test accuracy value: 59.7 - type: accuracy name: Chukchi Test accuracy value: 40.9 --- # XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Wolof This model is part of our paper called: - Make the Best of Cross-lingual Transfer: Evidence from POS Tagging with over 100 Languages Check the [Space](https://huggingface.co/spaces/wietsedv/xpos) for more details. ## Usage ```python from transformers import AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-wo") model = AutoModelForTokenClassification.from_pretrained("wietsedv/xlm-roberta-base-ft-udpos28-wo") ```
moshew/mpnet-base-sst2-distilled
d4113a574371c81e75e45fab23d036c5a12403e5
2022-02-24T11:43:00.000Z
[ "pytorch", "tensorboard", "mpnet", "text-classification", "transformers" ]
text-classification
false
moshew
null
moshew/mpnet-base-sst2-distilled
2
null
transformers
24,941
{'test_accuracy': 0.9426605504587156, 'test_loss': 0.1693699210882187, 'test_runtime': 1.7713, 'test_samples_per_second': 492.29, 'test_steps_per_second': 3.952}
cammy/pegasus-multi_news-finetuned-weaksup-1000-pegasus
8e030f5b549216e30ede0e02d8696419e0bfae2b
2022-02-24T11:21:53.000Z
[ "pytorch", "pegasus", "text2text-generation", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
text2text-generation
false
cammy
null
cammy/pegasus-multi_news-finetuned-weaksup-1000-pegasus
2
null
transformers
24,942
--- tags: - generated_from_trainer metrics: - rouge model-index: - name: pegasus-multi_news-finetuned-weaksup-1000-pegasus results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-multi_news-finetuned-weaksup-1000-pegasus This model is a fine-tuned version of [google/pegasus-multi_news](https://huggingface.co/google/pegasus-multi_news) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.1309 - Rouge1: 23.342 - Rouge2: 8.67 - Rougel: 17.2865 - Rougelsum: 19.8228 - Gen Len: 69.79 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:-------:|:---------:|:-------:| | 2.4526 | 1.0 | 1000 | 2.1309 | 23.342 | 8.67 | 17.2865 | 19.8228 | 69.79 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.2 - Datasets 1.18.3 - Tokenizers 0.11.0
Francesco/resnet26
59cac37d71961156ca054a47c58df3d729b5b807
2022-03-01T15:02:19.000Z
[ "pytorch", "resnet", "image-classification", "transformers" ]
image-classification
false
Francesco
null
Francesco/resnet26
2
null
transformers
24,943
Entry not found
Francesco/resnet34
831c45743e5a0d56dba86b8788db11d5a0023eba
2022-03-01T15:03:23.000Z
[ "pytorch", "resnet", "image-classification", "transformers" ]
image-classification
false
Francesco
null
Francesco/resnet34
2
null
transformers
24,944
Entry not found
Francesco/resnet152
f5969c4f2ebbdb08068418deb8773cd351c8b097
2022-03-01T15:09:03.000Z
[ "pytorch", "resnet", "image-classification", "transformers" ]
image-classification
false
Francesco
null
Francesco/resnet152
2
null
transformers
24,945
Entry not found
Krystalan/mdialbart_zh
9be5e56d89a6f778584ccf9c479dd9f34c7024c0
2022-02-24T12:11:13.000Z
[ "pytorch", "mbart", "text2text-generation", "arxiv:2202.05599", "transformers", "license:cc-by-nc-sa-4.0", "autotrain_compatible" ]
text2text-generation
false
Krystalan
null
Krystalan/mdialbart_zh
2
null
transformers
24,946
--- license: cc-by-nc-sa-4.0 --- ## mDialBART: A Cross-Lingual Dialogue Summarization Model This model is introduced by [*ClidSum: A Benchmark Dataset for Cross-Lingual Dialogue Summarization*](https://arxiv.org/abs/2202.05599).
mvip/wav2vec2-xls-r-300m-cv7-turkish-LM
32f56246ac38c1219023a757f0f3f7eaec529580
2022-02-24T13:23:44.000Z
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
false
mvip
null
mvip/wav2vec2-xls-r-300m-cv7-turkish-LM
2
null
transformers
24,947
Entry not found
simonmesserli/distilbert-base-uncased-finetuned-emotion
30ac712d99e5842bed878eaf3f123da579747334
2022-05-10T09:33:58.000Z
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
false
simonmesserli
null
simonmesserli/distilbert-base-uncased-finetuned-emotion
2
2
transformers
24,948
Entry not found
inovex/multi2convai-logistics-tr-bert
055f9076d10ac53c2cd217d341e1d9bc075732af
2022-03-01T08:54:59.000Z
[ "pytorch", "bert", "text-classification", "tr", "transformers", "license:mit" ]
text-classification
false
inovex
null
inovex/multi2convai-logistics-tr-bert
2
null
transformers
24,949
--- tags: - text-classification widget: - text: "paketi nereye koyabilirim?" license: mit language: tr --- # Multi2ConvAI-Logistics: finetuned Bert for Turkish This model was developed in the [Multi2ConvAI](https://multi2conv.ai) project: - domain: Logistics (more details about our use cases: ([en](https://multi2convai/en/blog/use-cases), [de](https://multi2convai/en/blog/use-cases))) - language: Turkish (tr) - model type: finetuned Bert ## How to run Requires: - Huggingface transformers ### Run with Huggingface Transformers ````python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("inovex/multi2convai-logistics-tr-bert") model = AutoModelForSequenceClassification.from_pretrained("inovex/multi2convai-logistics-tr-bert") ```` ## Further information on Multi2ConvAI: - https://multi2conv.ai - https://github.com/inovex/multi2convai - mailto: [email protected]
inovex/multi2convai-quality-en-bert
f4ef20df6a9785e0496ecba0e30861daa3f54be9
2022-03-01T09:00:55.000Z
[ "pytorch", "bert", "text-classification", "en", "transformers", "license:mit" ]
text-classification
false
inovex
null
inovex/multi2convai-quality-en-bert
2
null
transformers
24,950
--- tags: - text-classification widget: - text: "Start the program" license: mit language: en --- # Multi2ConvAI-Quality: finetuned Bert for English This model was developed in the [Multi2ConvAI](https://multi2conv.ai) project: - domain: Quality (more details about our use cases: ([en](https://multi2convai/en/blog/use-cases), [de](https://multi2convai/en/blog/use-cases))) - language: English (en) - model type: finetuned Bert ## How to run Requires: - Huggingface transformers ### Run with Huggingface Transformers ````python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("inovex/multi2convai-quality-en-bert") model = AutoModelForSequenceClassification.from_pretrained("inovex/multi2convai-quality-en-bert") ```` ## Further information on Multi2ConvAI: - https://multi2conv.ai - https://github.com/inovex/multi2convai - mailto: [email protected]
inovex/multi2convai-quality-en-mbert
ad29d70f98960e02a03bb8341bf862c4232ec8f5
2022-03-01T09:01:15.000Z
[ "pytorch", "bert", "text-classification", "en", "transformers", "license:mit" ]
text-classification
false
inovex
null
inovex/multi2convai-quality-en-mbert
2
1
transformers
24,951
--- tags: - text-classification widget: - text: "Start the program" license: mit language: en --- # Multi2ConvAI-Quality: finetuned MBert for English This model was developed in the [Multi2ConvAI](https://multi2conv.ai) project: - domain: Quality (more details about our use cases: ([en](https://multi2convai/en/blog/use-cases), [de](https://multi2convai/en/blog/use-cases))) - language: English (en) - model type: finetuned MBert ## How to run Requires: - Huggingface transformers ### Run with Huggingface Transformers ````python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("inovex/multi2convai-quality-en-mbert") model = AutoModelForSequenceClassification.from_pretrained("inovex/multi2convai-quality-en-mbert") ```` ## Further information on Multi2ConvAI: - https://multi2conv.ai - https://github.com/inovex/multi2convai - mailto: [email protected]
aypan17/distilgpt2-imdb
61095ff7e762bf8b43967c91eab2704734a56583
2022-02-24T18:33:38.000Z
[ "pytorch", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-generation
false
aypan17
null
aypan17/distilgpt2-imdb
2
null
transformers
24,952
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilgpt2-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-imdb This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the [imdb](https://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews) dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2 - Datasets 1.18.3 - Tokenizers 0.11.0
anas-awadalla/bert-base-uncased-few-shot-k-16-finetuned-squad-seed-0
a5f9eeec66429de5db05df1a4c3d0bf19443d012
2022-02-24T20:23:48.000Z
[ "pytorch", "tensorboard", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-16-finetuned-squad-seed-0
2
null
transformers
24,953
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-16-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-16-finetuned-squad-seed-0 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-16-finetuned-squad-seed-4
e12708cef2a1cfbd1cebcb3f5693a7c0f137dd60
2022-02-24T20:53:59.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-16-finetuned-squad-seed-4
2
null
transformers
24,954
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-16-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-16-finetuned-squad-seed-4 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-32-finetuned-squad-seed-2
fc802159c27cd54d01a5e68f02469662e46113f7
2022-02-24T22:09:35.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-32-finetuned-squad-seed-2
2
null
transformers
24,955
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-32-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-32-finetuned-squad-seed-2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-32-finetuned-squad-seed-6
6aa7b9407579d2ed978d9a5e0c5d8c78eb23f27c
2022-02-24T22:39:42.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-32-finetuned-squad-seed-6
2
null
transformers
24,956
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-32-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-32-finetuned-squad-seed-6 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-0
aa0a6b4589a041821c4a6c9a82b876c0a7f74e2c
2022-02-24T23:25:24.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-0
2
null
transformers
24,957
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-64-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-64-finetuned-squad-seed-0 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-2
07a610842ec9880625c7d37d7e0545d58e96ac49
2022-02-24T23:40:52.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-2
2
null
transformers
24,958
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-64-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-64-finetuned-squad-seed-2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-4
e7031b5508d2eca402decd77cac3edb9962e6770
2022-02-24T23:56:14.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-4
2
null
transformers
24,959
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-64-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-64-finetuned-squad-seed-4 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-6
6d7e49b82b722c2f5ec9660117e48f116edbedf2
2022-02-25T00:11:33.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-6
2
null
transformers
24,960
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-64-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-64-finetuned-squad-seed-6 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-10
74603918b5da2d83af2981227cc8ae6f6a1522a3
2022-02-25T00:42:17.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-10
2
null
transformers
24,961
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-64-finetuned-squad-seed-10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-64-finetuned-squad-seed-10 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-0
af1e97580e9ccd4c41560aef5d7791a233c541ab
2022-02-25T00:57:38.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-0
2
null
transformers
24,962
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-128-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-128-finetuned-squad-seed-0 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-2
da9ca901af2f7da78cb4c65913f8d95b55c2cffd
2022-02-25T01:13:01.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-2
2
null
transformers
24,963
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-128-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-128-finetuned-squad-seed-2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-8
86791d1abe2de1cb0151b1d81592b5660bca2e88
2022-02-25T01:56:24.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-8
2
null
transformers
24,964
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-128-finetuned-squad-seed-8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-128-finetuned-squad-seed-8 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-10
f434647626412197ab20564a7f8e97e38b4e39e4
2022-02-25T02:11:47.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-10
2
null
transformers
24,965
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-128-finetuned-squad-seed-10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-128-finetuned-squad-seed-10 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-256-finetuned-squad-seed-0
eab865c28ffd63224585bbda80b680a7d2b9e99a
2022-02-25T02:26:29.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-256-finetuned-squad-seed-0
2
null
transformers
24,966
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-256-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-256-finetuned-squad-seed-0 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-256-finetuned-squad-seed-2
ed4b41f82caf56b8fd8e4c38c8ea488bd63e75c3
2022-02-25T02:41:14.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-256-finetuned-squad-seed-2
2
null
transformers
24,967
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-256-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-256-finetuned-squad-seed-2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-256-finetuned-squad-seed-6
dd48f992d02a2e02e76be7df40ab7cece0fd0486
2022-02-25T03:10:43.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-256-finetuned-squad-seed-6
2
null
transformers
24,968
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-256-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-256-finetuned-squad-seed-6 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-512-finetuned-squad-seed-0
3149361732186ded768d22f94e595f14acbd2a5e
2022-02-25T03:55:46.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-512-finetuned-squad-seed-0
2
null
transformers
24,969
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-512-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-512-finetuned-squad-seed-0 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-512-finetuned-squad-seed-2
dd6278f660411021f1bc18dbcd06dc930eb0a06b
2022-02-25T04:11:20.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-512-finetuned-squad-seed-2
2
null
transformers
24,970
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-512-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-512-finetuned-squad-seed-2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-512-finetuned-squad-seed-6
b89bfe6770cae7ea011d58af23b59fc1957635ad
2022-02-25T04:42:31.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-512-finetuned-squad-seed-6
2
null
transformers
24,971
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-512-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-512-finetuned-squad-seed-6 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-1024-finetuned-squad-seed-2
35165d59394715b9e9fd4c80a0e19938f932a597
2022-02-25T05:48:11.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-1024-finetuned-squad-seed-2
2
null
transformers
24,972
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-1024-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-1024-finetuned-squad-seed-2 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/bert-base-uncased-few-shot-k-1024-finetuned-squad-seed-10
df7f842fb686c9d80b51ea2434bcc8fbb1468622
2022-02-25T06:56:53.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/bert-base-uncased-few-shot-k-1024-finetuned-squad-seed-10
2
null
transformers
24,973
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-uncased-few-shot-k-1024-finetuned-squad-seed-10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-few-shot-k-1024-finetuned-squad-seed-10 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-16-finetuned-squad-seed-6
3977ae66819bd62f0f92fc16f9c562822fd2e363
2022-02-25T08:04:48.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-16-finetuned-squad-seed-6
2
null
transformers
24,974
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-16-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-16-finetuned-squad-seed-6 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-0
e3788a8ecdaea1ef61d5eb781f86e0b5106f0567
2022-02-25T08:54:29.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-0
2
null
transformers
24,975
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-32-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-32-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-2
2e24fccd9828d6dd3a25ccf70441b3d7ddc9ceb1
2022-02-25T09:11:30.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-2
2
null
transformers
24,976
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-32-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-32-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-8
584397dbb226ba9a6068b4985d67e4d494b0eb77
2022-02-25T10:02:23.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-8
2
null
transformers
24,977
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-32-finetuned-squad-seed-8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-32-finetuned-squad-seed-8 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-10
8726b1f4b248c91d8d4010e85c52ca08a2bd0cd8
2022-02-25T10:19:19.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-10
2
null
transformers
24,978
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-32-finetuned-squad-seed-10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-32-finetuned-squad-seed-10 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-0
c43c884783d5d12a4e8c671b519645609b541562
2022-02-25T10:36:26.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-0
2
null
transformers
24,979
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-64-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-64-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
mercelisw/electra-grc
04bbd636b9aac601b9b8e6016371b9011d56a462
2022-02-25T11:08:08.000Z
[ "pytorch", "grc", "transformers", "ELECTRA", "TensorFlow" ]
null
false
mercelisw
null
mercelisw/electra-grc
2
null
transformers
24,980
--- language: - grc tags: - ELECTRA - TensorFlow --- An ELECTRA-small model for Ancient Greek, trained on texts from Homer up until the 4th century AD.
anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-6
38a834f53a96630f2702753e97b08f8a7afe4b5a
2022-02-25T11:27:54.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-6
2
null
transformers
24,981
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-64-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-64-finetuned-squad-seed-6 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-8
eec14d300b238b7c005a8f7aac498c5702917909
2022-02-25T11:45:04.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-8
2
null
transformers
24,982
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-64-finetuned-squad-seed-8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-64-finetuned-squad-seed-8 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-0
279d2fc217d7687bb819b6094c82ea6b159a8a3e
2022-02-25T12:17:02.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-0
2
null
transformers
24,983
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-128-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-128-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-4
7af4ee5777f1d8ae0e6652758475632e8a499b8a
2022-02-25T12:51:24.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-4
2
null
transformers
24,984
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-128-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-128-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
Davlan/xlm-roberta-base-finetuned-chichewa
f2ed5085ec39d20e1f8e7eb50a234e70d1adeff5
2022-02-25T13:09:19.000Z
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
Davlan
null
Davlan/xlm-roberta-base-finetuned-chichewa
2
null
transformers
24,985
--- license: apache-2.0 ---
Davlan/xlm-roberta-base-finetuned-somali
2181fe5b873b51be6d507944963cd31912a6818d
2022-02-25T13:51:37.000Z
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
Davlan
null
Davlan/xlm-roberta-base-finetuned-somali
2
null
transformers
24,986
--- license: apache-2.0 ---
anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-0
a138008483ea8519496a50a6ddb4a0e3ec790822
2022-02-25T13:59:28.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-0
2
null
transformers
24,987
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-256-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-256-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
Davlan/xlm-roberta-base-finetuned-xhosa
7a5b7db1b20ef3ff0f5b3ffe946489a827ecb190
2022-02-25T14:52:31.000Z
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
false
Davlan
null
Davlan/xlm-roberta-base-finetuned-xhosa
2
null
transformers
24,988
--- license: apache-2.0 ---
anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-4
a80d405e279ec3755df8c54c87be0c62928197f2
2022-02-25T14:32:34.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-4
2
null
transformers
24,989
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-256-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-256-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-6
2a4caac59286017a7ee46316ce2e8d4067096341
2022-02-25T14:49:04.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-6
2
null
transformers
24,990
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-256-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-256-finetuned-squad-seed-6 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-10
ee91332f2929c206c67d5ec5ecccc1f59d7bbbfc
2022-02-25T15:22:07.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-10
2
null
transformers
24,991
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-256-finetuned-squad-seed-10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-256-finetuned-squad-seed-10 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-2
2be843e598704ea18373521c472f882f263b09e1
2022-02-25T15:56:56.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-2
2
null
transformers
24,992
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-512-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-512-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-6
dc8fc1013f78bd31f3c70585c61286bb5757c7ab
2022-02-25T16:31:42.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-6
2
null
transformers
24,993
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-512-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-512-finetuned-squad-seed-6 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-10
bae0d11de41064adc6559bd9a36ed6893fa40fc8
2022-02-25T17:06:27.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-10
2
null
transformers
24,994
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-512-finetuned-squad-seed-10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-512-finetuned-squad-seed-10 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-4
4a9ec2934a4e289b428fb48e31228b6036b640d2
2022-02-25T18:03:56.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-4
2
null
transformers
24,995
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-1024-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-1024-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-6
9ede399c94c256230b4bb185307c5612ab924500
2022-02-25T18:23:03.000Z
[ "pytorch", "roberta", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-6
2
null
transformers
24,996
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-few-shot-k-1024-finetuned-squad-seed-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-few-shot-k-1024-finetuned-squad-seed-6 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/spanbert-base-cased-few-shot-k-16-finetuned-squad-seed-10
447e99c45e6b862ffe1d6d2d99a7a1ee32733426
2022-02-25T20:28:21.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/spanbert-base-cased-few-shot-k-16-finetuned-squad-seed-10
2
null
transformers
24,997
--- tags: - generated_from_trainer datasets: - squad model-index: - name: spanbert-base-cased-few-shot-k-16-finetuned-squad-seed-10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # spanbert-base-cased-few-shot-k-16-finetuned-squad-seed-10 This model is a fine-tuned version of [SpanBERT/spanbert-base-cased](https://huggingface.co/SpanBERT/spanbert-base-cased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
anas-awadalla/spanbert-base-cased-few-shot-k-32-finetuned-squad-seed-0
35a5236e712dbb8b22ded674d98ef2d18950fd65
2022-02-25T20:43:18.000Z
[ "pytorch", "bert", "question-answering", "dataset:squad", "transformers", "generated_from_trainer", "model-index", "autotrain_compatible" ]
question-answering
false
anas-awadalla
null
anas-awadalla/spanbert-base-cased-few-shot-k-32-finetuned-squad-seed-0
2
null
transformers
24,998
--- tags: - generated_from_trainer datasets: - squad model-index: - name: spanbert-base-cased-few-shot-k-32-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # spanbert-base-cased-few-shot-k-32-finetuned-squad-seed-0 This model is a fine-tuned version of [SpanBERT/spanbert-base-cased](https://huggingface.co/SpanBERT/spanbert-base-cased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.17.0 - Tokenizers 0.10.3
mrm8488/ViT2GPT-2-es
3e72fd3996badbe846315bbbfa0cefdabd188e0b
2022-02-25T20:37:40.000Z
[ "pytorch", "vision-encoder-decoder", "es", "transformers", "Vit2gpt", "captioning" ]
null
false
mrm8488
null
mrm8488/ViT2GPT-2-es
2
null
transformers
24,999
--- language: - es tags: - Vit2gpt - captioning --- # Spanish ViT to GPT-2 ### WIP