modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
TheLongSentance/t5_mimic_final_chkpnt20000 | be7e23ba010f58821565c2f0f242d9d47b9f5995 | 2021-09-16T08:14:05.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_final_chkpnt20000 | 2 | null | transformers | 23,500 | Entry not found |
TheLongSentance/t5_mimic_final_chkpnt225000 | ea507a51fcd7d9879f757af224ddad6a52d1c993 | 2021-09-16T10:12:27.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_final_chkpnt225000 | 2 | null | transformers | 23,501 | Entry not found |
TheLongSentance/t5_mimic_final_chkpnt75000 | db4a2fde5525a5d54674321f57e018dcab3be504 | 2021-09-16T08:42:21.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_final_chkpnt75000 | 2 | null | transformers | 23,502 | Entry not found |
TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_1_nbn_lr3e4c_chkpnt20000 | 89c50451dec60b448fd5201da1d86f5588cb6a33 | 2021-09-15T20:03:54.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_1_nbn_lr3e4c_chkpnt20000 | 2 | null | transformers | 23,503 | Entry not found |
TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_chkpnt5000 | beb05df6ab0de1d8fcb54985a71b0fd1f4caf756 | 2021-09-15T18:14:45.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_chkpnt5000 | 2 | null | transformers | 23,504 | Entry not found |
Thejas/DialoGPT-small-Stewei | d5a415ecb189006a4ead753a50f078674c11f69e | 2021-11-04T05:19:02.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Thejas | null | Thejas/DialoGPT-small-Stewei | 2 | null | transformers | 23,505 | ---
tags:
- conversational
---
#Stewie DialoGPT Model |
TingChenChang/bert-base-chinese-finetuned-squad-colab | 31389a825ea6569e931615d9b598daeb25593af7 | 2021-09-09T01:35:35.000Z | [
"pytorch",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | TingChenChang | null | TingChenChang/bert-base-chinese-finetuned-squad-colab | 2 | null | transformers | 23,506 | Entry not found |
TingChenChang/bert-multi-cased-finetuned-xquadv1-finetuned-squad-colab | ee94a3f363e55f73114404c5fc05f7897a340899 | 2021-09-13T04:57:07.000Z | [
"pytorch",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | TingChenChang | null | TingChenChang/bert-multi-cased-finetuned-xquadv1-finetuned-squad-colab | 2 | null | transformers | 23,507 | Entry not found |
Titantoe/IceBERT-finetuned-ner | b36bfdb88dea2fcaad0472af9c639557d51e6f1a | 2021-10-04T22:31:18.000Z | [
"pytorch",
"tensorboard",
"roberta",
"token-classification",
"dataset:mim_gold_ner",
"transformers",
"generated_from_trainer",
"license:gpl-3.0",
"model-index",
"autotrain_compatible"
] | token-classification | false | Titantoe | null | Titantoe/IceBERT-finetuned-ner | 2 | null | transformers | 23,508 | ---
license: gpl-3.0
tags:
- generated_from_trainer
datasets:
- mim_gold_ner
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: IceBERT-finetuned-ner
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: mim_gold_ner
type: mim_gold_ner
args: mim-gold-ner
metrics:
- name: Precision
type: precision
value: 0.8920083733530353
- name: Recall
type: recall
value: 0.8655753375552635
- name: F1
type: f1
value: 0.8785930867192238
- name: Accuracy
type: accuracy
value: 0.9855436530476731
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# IceBERT-finetuned-ner
This model is a fine-tuned version of [vesteinn/IceBERT](https://huggingface.co/vesteinn/IceBERT) on the mim_gold_ner dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0772
- Precision: 0.8920
- Recall: 0.8656
- F1: 0.8786
- Accuracy: 0.9855
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.0519 | 1.0 | 2904 | 0.0731 | 0.8700 | 0.8564 | 0.8631 | 0.9832 |
| 0.026 | 2.0 | 5808 | 0.0749 | 0.8771 | 0.8540 | 0.8654 | 0.9840 |
| 0.0159 | 3.0 | 8712 | 0.0772 | 0.8920 | 0.8656 | 0.8786 | 0.9855 |
### Framework versions
- Transformers 4.11.2
- Pytorch 1.9.0+cu102
- Datasets 1.12.1
- Tokenizers 0.10.3
|
Tito/T5small_model2_learning_rate_2e-4-finetuned-en-to-de | f30caeb57d6755a952c175d7d351065a46bebb4c | 2021-12-06T23:39:50.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Tito | null | Tito/T5small_model2_learning_rate_2e-4-finetuned-en-to-de | 2 | null | transformers | 23,509 | Entry not found |
Toadally/DialoGPT-small-david_mast | 512c8a7b2915395df83ef8d51cada23a7bcfd384 | 2022-02-02T14:50:44.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Toadally | null | Toadally/DialoGPT-small-david_mast | 2 | null | transformers | 23,510 | ---
tags:
- conversational
---
# Mast DialoGPT Model |
Tofu05/DialoGPT-large-boon2 | d98b9002b9f3a14fb8cabe29b4dbd09ea132b562 | 2022-01-30T11:45:16.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Tofu05 | null | Tofu05/DialoGPT-large-boon2 | 2 | null | transformers | 23,511 | ---
tags:
- conversational
---
# Boon 2 DialoGPT Model |
Tr1ex/DialoGPT-small-rick | 089cf892d5dc23195c8a1974add76638a61fd670 | 2022-01-08T11:38:39.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Tr1ex | null | Tr1ex/DialoGPT-small-rick | 2 | null | transformers | 23,512 | ---
tags:
- conversational
---
# Rick DialoGPT Model |
Transabrar/bert-base-uncased-finetuned-bertbero | 9429e26c7ad302b8c2209a43f2d67dc6ec62da78 | 2021-10-19T21:59:42.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Transabrar | null | Transabrar/bert-base-uncased-finetuned-bertbero | 2 | null | transformers | 23,513 | Entry not found |
Transabrar/roberta-large-finetuned-abrar | ad33e6fb37619918d6f66d82a8c14ce52017275e | 2021-10-10T20:23:33.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Transabrar | null | Transabrar/roberta-large-finetuned-abrar | 2 | null | transformers | 23,514 | Entry not found |
TrimPeachu/Deadpool | b25f699c080b59c8534b9339fc7f0254d964ce08 | 2021-08-29T06:58:49.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | TrimPeachu | null | TrimPeachu/Deadpool | 2 | null | transformers | 23,515 | ---
tags:
- conversational
---
#Deadpool DialoGPT Model |
TuhinColumbia/QAGenmodelBARTELI51 | 52f89776b81aa2d4c79104d8b07ecf620ce99fdd | 2021-09-29T19:02:06.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | TuhinColumbia | null | TuhinColumbia/QAGenmodelBARTELI51 | 2 | null | transformers | 23,516 | Entry not found |
TuhinColumbia/QAGenmodelBARTELI5CC | fe76789730515b341d4b44c1dfcd7cbe724aca26 | 2021-10-10T05:57:11.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | TuhinColumbia | null | TuhinColumbia/QAGenmodelBARTELI5CC | 2 | null | transformers | 23,517 | Entry not found |
TuhinColumbia/portugesepoetrymany | 80e9bd5d6a6f7a1045aabd96c0cbc5150383d9f4 | 2021-09-03T22:24:38.000Z | [
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | TuhinColumbia | null | TuhinColumbia/portugesepoetrymany | 2 | null | transformers | 23,518 | Entry not found |
TurkuNLP/wikibert-base-bg-cased | 10536da67d7846cb0d83ea2f0ec5549b9fc36dbe | 2020-05-24T19:58:50.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-bg-cased | 2 | null | transformers | 23,519 | Entry not found |
TurkuNLP/wikibert-base-ca-cased | 7b258d41ab6e101c68d54cc1a6ca5ec11f226cc0 | 2020-05-24T19:58:56.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-ca-cased | 2 | null | transformers | 23,520 | Entry not found |
TurkuNLP/wikibert-base-da-cased | 32af50bcd8c11f419064eb1a5d0524584c75b727 | 2020-05-24T19:59:06.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-da-cased | 2 | null | transformers | 23,521 | Entry not found |
TurkuNLP/wikibert-base-de-cased | 263fde37923fb96fe2197dbb43bd0dbeff276ba9 | 2020-05-24T19:59:14.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-de-cased | 2 | null | transformers | 23,522 | Entry not found |
TurkuNLP/wikibert-base-es-cased | 26e915842f1ccdc17dbdf2a01f51312f079dfdab | 2020-05-24T19:59:29.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-es-cased | 2 | null | transformers | 23,523 | Entry not found |
TurkuNLP/wikibert-base-fi-cased | a72b9e7e8ab0848d0da82223f4bbfb1ccede41d6 | 2020-05-24T19:59:52.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-fi-cased | 2 | null | transformers | 23,524 | Entry not found |
TurkuNLP/wikibert-base-ga-cased | e03f5e8028baf49577e8f573a78dd320259dc0a8 | 2020-05-24T20:00:02.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-ga-cased | 2 | null | transformers | 23,525 | Entry not found |
TurkuNLP/wikibert-base-hu-cased | 00f7e02d07acc39d2aeea75c3a91e31468c1c9af | 2020-05-24T20:00:28.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-hu-cased | 2 | null | transformers | 23,526 | Entry not found |
TurkuNLP/wikibert-base-nl-cased | 79d84b5e75a05bf2c96aeb7cf72b55fa33739736 | 2020-05-24T20:01:07.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-nl-cased | 2 | null | transformers | 23,527 | Entry not found |
TurkuNLP/wikibert-base-no-cased | 7093fe72daf1810f6fec38e05a7f7623abe47aac | 2020-05-24T20:01:12.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-no-cased | 2 | null | transformers | 23,528 | Entry not found |
TurkuNLP/wikibert-base-pt-cased | 89df590f3002b8dd00fcdb1ad0be74dd2b7b7b7f | 2020-05-24T20:01:22.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-pt-cased | 2 | null | transformers | 23,529 | Entry not found |
TurkuNLP/wikibert-base-ro-cased | f63b076f4c8161dfcbc952aad3806a95992a17ba | 2020-05-24T20:01:27.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-ro-cased | 2 | null | transformers | 23,530 | Entry not found |
TurkuNLP/wikibert-base-sl-cased | 9cd5de2179f5c4e00528327dd7f59c40eaaefd72 | 2020-05-24T20:01:43.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-sl-cased | 2 | null | transformers | 23,531 | Entry not found |
TurkuNLP/wikibert-base-sr-cased | 875e924523a257e74914af8bebf7690ff3d72bd1 | 2020-05-24T20:01:48.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-sr-cased | 2 | null | transformers | 23,532 | Entry not found |
TurkuNLP/wikibert-base-tr-cased | d44ff88f0ef8a54f9e1eee694e6dc6fb6bdda2e1 | 2020-05-24T20:02:06.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-tr-cased | 2 | null | transformers | 23,533 | Entry not found |
TurkuNLP/wikibert-base-uk-cased | 3bea2ac81dd4716145cd5b0522550a01633fe62c | 2020-05-24T20:02:13.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-uk-cased | 2 | null | transformers | 23,534 | Entry not found |
UWB-AIR/Czert-B-base-cased-long-zero-shot | e9af7f6ce54ffbef9ca4e56352c41c4a463bb6e3 | 2022-05-03T13:49:35.000Z | [
"pytorch",
"longformer",
"feature-extraction",
"arxiv:2103.13031",
"transformers",
"cs",
"fill-mask"
] | feature-extraction | false | UWB-AIR | null | UWB-AIR/Czert-B-base-cased-long-zero-shot | 2 | null | transformers | 23,535 | ---
tags:
- cs
- fill-mask
---
# CZERT
This repository keeps trained Czert-B-base-cased-long-zero-shot model for the paper [Czert – Czech BERT-like Model for Language Representation
](https://arxiv.org/abs/2103.13031)
For more information, see the paper
This is long version of Czert-B-base-cased created without any finetunning on long documents. Positional embedings were created by simply repeating the positional embeddings of the original Czert-B model. For tokenization, please use BertTokenizer. Cannot be used with AutoTokenizer.
## Available Models
You can download **MLM & NSP only** pretrained models
~~[CZERT-A-v1](https://air.kiv.zcu.cz/public/CZERT-A-czert-albert-base-uncased.zip)
[CZERT-B-v1](https://air.kiv.zcu.cz/public/CZERT-B-czert-bert-base-cased.zip)~~
After some additional experiments, we found out that the tokenizers config was exported wrongly. In Czert-B-v1, the tokenizer parameter "do_lower_case" was wrongly set to true. In Czert-A-v1 the parameter "strip_accents" was incorrectly set to true.
Both mistakes are repaired in v2.
[CZERT-A-v2](https://air.kiv.zcu.cz/public/CZERT-A-v2-czert-albert-base-uncased.zip)
[CZERT-B-v2](https://air.kiv.zcu.cz/public/CZERT-B-v2-czert-bert-base-cased.zip)
or choose from one of **Finetuned Models**
| | Models |
| - | - |
| Sentiment Classification<br> (Facebook or CSFD) | [CZERT-A-sentiment-FB](https://air.kiv.zcu.cz/public/CZERT-A_fb.zip) <br> [CZERT-B-sentiment-FB](https://air.kiv.zcu.cz/public/CZERT-B_fb.zip) <br> [CZERT-A-sentiment-CSFD](https://air.kiv.zcu.cz/public/CZERT-A_csfd.zip) <br> [CZERT-B-sentiment-CSFD](https://air.kiv.zcu.cz/public/CZERT-B_csfd.zip) | Semantic Text Similarity <br> (Czech News Agency) | [CZERT-A-sts-CNA](https://air.kiv.zcu.cz/public/CZERT-A-sts-CNA.zip) <br> [CZERT-B-sts-CNA](https://air.kiv.zcu.cz/public/CZERT-B-sts-CNA.zip)
| Named Entity Recognition | [CZERT-A-ner-CNEC](https://air.kiv.zcu.cz/public/CZERT-A-ner-CNEC-cased.zip) <br> [CZERT-B-ner-CNEC](https://air.kiv.zcu.cz/public/CZERT-B-ner-CNEC-cased.zip) <br>[PAV-ner-CNEC](https://air.kiv.zcu.cz/public/PAV-ner-CNEC-cased.zip) <br> [CZERT-A-ner-BSNLP](https://air.kiv.zcu.cz/public/CZERT-A-ner-BSNLP-cased.zip)<br>[CZERT-B-ner-BSNLP](https://air.kiv.zcu.cz/public/CZERT-B-ner-BSNLP-cased.zip) <br>[PAV-ner-BSNLP](https://air.kiv.zcu.cz/public/PAV-ner-BSNLP-cased.zip) |
| Morphological Tagging<br> | [CZERT-A-morphtag-126k](https://air.kiv.zcu.cz/public/CZERT-A-morphtag-126k-cased.zip)<br>[CZERT-B-morphtag-126k](https://air.kiv.zcu.cz/public/CZERT-B-morphtag-126k-cased.zip) |
| Semantic Role Labelling |[CZERT-A-srl](https://air.kiv.zcu.cz/public/CZERT-A-srl-cased.zip)<br> [CZERT-B-srl](https://air.kiv.zcu.cz/public/CZERT-B-srl-cased.zip) |
## How to Use CZERT?
### Sentence Level Tasks
We evaluate our model on two sentence level tasks:
* Sentiment Classification,
* Semantic Text Similarity.
<!-- tokenizer = BertTokenizerFast.from_pretrained(CZERT_MODEL_PATH, strip_accents=False)
model = TFAlbertForSequenceClassification.from_pretrained(CZERT_MODEL_PATH, num_labels=1)
or
self.tokenizer = BertTokenizerFast.from_pretrained(CZERT_MODEL_PATH, strip_accents=False)
self.model_encoder = AutoModelForSequenceClassification.from_pretrained(CZERT_MODEL_PATH, from_tf=True)
-->
### Document Level Tasks
We evaluate our model on one document level task
* Multi-label Document Classification.
### Token Level Tasks
We evaluate our model on three token level tasks:
* Named Entity Recognition,
* Morphological Tagging,
* Semantic Role Labelling.
## Downstream Tasks Fine-tuning Results
### Sentiment Classification
| | mBERT | SlavicBERT | ALBERT-r | Czert-A | Czert-B |
|:----:|:------------------------:|:------------------------:|:------------------------:|:-----------------------:|:--------------------------------:|
| FB | 71.72 ± 0.91 | 73.87 ± 0.50 | 59.50 ± 0.47 | 72.47 ± 0.72 | **76.55** ± **0.14** |
| CSFD | 82.80 ± 0.14 | 82.51 ± 0.14 | 75.40 ± 0.18 | 79.58 ± 0.46 | **84.79** ± **0.26** |
Average F1 results for the Sentiment Classification task. For more information, see [the paper](https://arxiv.org/abs/2103.13031).
### Semantic Text Similarity
| | **mBERT** | **Pavlov** | **Albert-random** | **Czert-A** | **Czert-B** |
|:-------------|:--------------:|:--------------:|:-----------------:|:--------------:|:----------------------:|
| STA-CNA | 83.335 ± 0.063 | 83.593 ± 0.050 | 43.184 ± 0.125 | 82.942 ± 0.106 | **84.345** ± **0.028** |
| STS-SVOB-img | 79.367 ± 0.486 | 79.900 ± 0.810 | 15.739 ± 2.992 | 79.444 ± 0.338 | **83.744** ± **0.395** |
| STS-SVOB-hl | 78.833 ± 0.296 | 76.996 ± 0.305 | 33.949 ± 1.807 | 75.089 ± 0.806 | **79.827 ± 0.469** |
Comparison of Pearson correlation achieved using pre-trained CZERT-A, CZERT-B, mBERT, Pavlov and randomly initialised Albert on semantic text similarity. For more information see [the paper](https://arxiv.org/abs/2103.13031).
### Multi-label Document Classification
| | mBERT | SlavicBERT | ALBERT-r | Czert-A | Czert-B |
|:-----:|:------------:|:------------:|:------------:|:------------:|:-------------------:|
| AUROC | 97.62 ± 0.08 | 97.80 ± 0.06 | 94.35 ± 0.13 | 97.49 ± 0.07 | **98.00** ± **0.04** |
| F1 | 83.04 ± 0.16 | 84.08 ± 0.14 | 72.44 ± 0.22 | 82.27 ± 0.17 | **85.06** ± **0.11** |
Comparison of F1 and AUROC score achieved using pre-trained CZERT-A, CZERT-B, mBERT, Pavlov and randomly initialised Albert on multi-label document classification. For more information see [the paper](https://arxiv.org/abs/2103.13031).
### Morphological Tagging
| | mBERT | Pavlov | Albert-random | Czert-A | Czert-B |
|:-----------------------|:---------------|:---------------|:---------------|:---------------|:---------------|
| Universal Dependencies | 99.176 ± 0.006 | 99.211 ± 0.008 | 96.590 ± 0.096 | 98.713 ± 0.008 | **99.300 ± 0.009** |
Comparison of F1 score achieved using pre-trained CZERT-A, CZERT-B, mBERT, Pavlov and randomly initialised Albert on morphological tagging task. For more information see [the paper](https://arxiv.org/abs/2103.13031).
### Semantic Role Labelling
<div id="tab:SRL">
| | mBERT | Pavlov | Albert-random | Czert-A | Czert-B | dep-based | gold-dep |
|:------:|:----------:|:----------:|:-------------:|:----------:|:----------:|:---------:|:--------:|
| span | 78.547 ± 0.110 | 79.333 ± 0.080 | 51.365 ± 0.423 | 72.254 ± 0.172 | **81.861 ± 0.102** | \- | \- |
| syntax | 90.226 ± 0.224 | 90.492 ± 0.040 | 80.747 ± 0.131 | 80.319 ± 0.054 | **91.462 ± 0.062** | 85.19 | 89.52 |
SRL results – dep columns are evaluate with labelled F1 from CoNLL 2009 evaluation script, other columns are evaluated with span F1 score same as it was used for NER evaluation. For more information see [the paper](https://arxiv.org/abs/2103.13031).
</div>
### Named Entity Recognition
| | mBERT | Pavlov | Albert-random | Czert-A | Czert-B |
|:-----------|:---------------|:---------------|:---------------|:---------------|:---------------|
| CNEC | **86.225 ± 0.208** | **86.565 ± 0.198** | 34.635 ± 0.343 | 72.945 ± 0.227 | 86.274 ± 0.116 |
| BSNLP 2019 | 84.006 ± 1.248 | **86.699 ± 0.370** | 19.773 ± 0.938 | 48.859 ± 0.605 | **86.729 ± 0.344** |
Comparison of f1 score achieved using pre-trained CZERT-A, CZERT-B, mBERT, Pavlov and randomly initialised Albert on named entity recognition task. For more information see [the paper](https://arxiv.org/abs/2103.13031).
## Licence
This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. http://creativecommons.org/licenses/by-nc-sa/4.0/
## How should I cite CZERT?
For now, please cite [the Arxiv paper](https://arxiv.org/abs/2103.13031):
```
@article{sido2021czert,
title={Czert -- Czech BERT-like Model for Language Representation},
author={Jakub Sido and Ondřej Pražák and Pavel Přibáň and Jan Pašek and Michal Seják and Miloslav Konopík},
year={2021},
eprint={2103.13031},
archivePrefix={arXiv},
primaryClass={cs.CL},
journal={arXiv preprint arXiv:2103.13031},
}
```
|
Ulto/pythonCoPilot2 | e9dec1104cfea71b976247a2a7ff80b4ed7c15aa | 2021-11-22T00:24:53.000Z | [
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"model-index"
] | text-generation | false | Ulto | null | Ulto/pythonCoPilot2 | 2 | null | transformers | 23,536 | ---
tags:
- generated_from_trainer
model-index:
- name: pythonCoPilot2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pythonCoPilot2
This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 4.0479
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 427 | 4.3782 |
| 4.6698 | 2.0 | 854 | 4.0718 |
| 3.3953 | 3.0 | 1281 | 4.0479 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
|
Unbabel/XLM-R-11L | d80859986f9e9bb4a7ca091da303a1df354c4bf3 | 2022-01-05T19:55:41.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | Unbabel | null | Unbabel/XLM-R-11L | 2 | null | transformers | 23,537 | Entry not found |
Unbabel/XLM-R-22L | 7995ea43cf7b7757efec6b0e4df71448803fae3c | 2022-01-05T21:22:06.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | Unbabel | null | Unbabel/XLM-R-22L | 2 | null | transformers | 23,538 | Entry not found |
Unbabel/XLM-R-9L | dbf1c2ad290a0002581aa9c5e5957c6e0f0ed09e | 2022-01-05T19:42:43.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | Unbabel | null | Unbabel/XLM-R-9L | 2 | null | transformers | 23,539 | Entry not found |
Username1/Wenger | 96a1c522ff8433efab35fb957bc7c80f70db61c3 | 2021-09-11T18:58:31.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Username1 | null | Username1/Wenger | 2 | null | transformers | 23,540 | ---
tags:
- conversational
---
# Wenger |
VaibhS/quantized_model | db170b6cf1423f79d57e535102f5ba87ebbef30a | 2022-01-04T20:36:45.000Z | [
"pytorch",
"pegasus",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | VaibhS | null | VaibhS/quantized_model | 2 | null | transformers | 23,541 | Entry not found |
VariableZee/DialoGPT-small-ivylia03 | 651bb0b28175de763ae3c7746b05b88b29621c12 | 2021-10-27T08:50:29.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | VariableZee | null | VariableZee/DialoGPT-small-ivylia03 | 2 | null | transformers | 23,542 | ---
tags:
- conversational
---
|
Vasanth/en-ta-translator | 345afb79465bd5e4ddd0177c1659bece2a36b088 | 2022-02-18T03:50:38.000Z | [
"pytorch",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Vasanth | null | Vasanth/en-ta-translator | 2 | null | transformers | 23,543 | Entry not found |
Vasanth/multi-qa-MiniLM-L6-cos-v1-qa-squad2-retriever | af3d1c02f3be0e62b64cdf10846a012ab575c612 | 2022-02-09T00:44:30.000Z | [
"pytorch",
"bert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | Vasanth | null | Vasanth/multi-qa-MiniLM-L6-cos-v1-qa-squad2-retriever | 2 | null | sentence-transformers | 23,544 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# Vasanth/multi-qa-MiniLM-L6-cos-v1-qa-squad2-retriever
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('Vasanth/multi-qa-MiniLM-L6-cos-v1-qa-squad2-retriever')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('Vasanth/multi-qa-MiniLM-L6-cos-v1-qa-squad2-retriever')
model = AutoModel.from_pretrained('Vasanth/multi-qa-MiniLM-L6-cos-v1-qa-squad2-retriever')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Vasanth/multi-qa-MiniLM-L6-cos-v1-qa-squad2-retriever)
## Training
The model was trained with the parameters:
**DataLoader**:
`sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 8144 with parameters:
```
{'batch_size': 16}
```
**Loss**:
`sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
```
{'scale': 20.0, 'similarity_fct': 'cos_sim'}
```
Parameters of the fit()-Method:
```
{
"epochs": 3,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": null,
"warmup_steps": 2443,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
(1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
VishalArun/DialoGPT-medium-harrypotter | 74d228dfdc335d84ed3d5b54b6cdfc07a4f98120 | 2021-08-29T10:12:39.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | VishalArun | null | VishalArun/DialoGPT-medium-harrypotter | 2 | null | transformers | 23,545 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
Vitafeu/DialoGPT-medium-ricksanchez | 0d88f26929ee040765e97220f4f40a0530b8f3be | 2021-09-16T08:59:52.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Vitafeu | null | Vitafeu/DialoGPT-medium-ricksanchez | 2 | null | transformers | 23,546 | ---
tags:
- conversational
---
# Rick Sanchez DialoGPT Model |
VoVanPhuc/Phobert2Roberta | 4d8281862be8b9b38a2ee650b79e7fb64020fd59 | 2021-08-26T07:38:48.000Z | [
"pytorch",
"encoder-decoder",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | VoVanPhuc | null | VoVanPhuc/Phobert2Roberta | 2 | null | transformers | 23,547 | Entry not found |
VoVanPhuc/Roberta2Phobert | 1c1e8055c300c844a74b1c3f710038bf64a320f6 | 2021-08-26T07:37:35.000Z | [
"pytorch",
"encoder-decoder",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | VoVanPhuc | null | VoVanPhuc/Roberta2Phobert | 2 | null | transformers | 23,548 | Entry not found |
Wikidepia/IndoConvBERT-base | 37a849691847717f991f8c0189c40f25b11d0e53 | 2021-04-02T07:22:25.000Z | [
"pytorch",
"tf",
"convbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | Wikidepia | null | Wikidepia/IndoConvBERT-base | 2 | null | transformers | 23,549 | ---
inference: false
language: id
---
# IndoConvBERT Base Model
IndoConvBERT is a ConvBERT model pretrained on Indo4B.
## Pretraining details
We follow a different training procedure: instead of using a two-phase approach, that pre-trains the model for 90% with 128 sequence length and 10% with 512 sequence length, we pre-train the model with 512 sequence length for 1M steps on a v3-8 TPU.
The current version of the model is trained on Indo4B and small Twitter dump.
## Acknowledgement
Big thanks to TFRC (TensorFlow Research Cloud) for providing free TPU.
|
WikinewsSum/bart-large-cnn-multi-en-wiki-news | 2a170805a336bb1e802a722ed9bfeb0af2d9b6e6 | 2020-07-01T08:31:39.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | WikinewsSum | null | WikinewsSum/bart-large-cnn-multi-en-wiki-news | 2 | null | transformers | 23,550 | Entry not found |
WikinewsSum/bart-large-multi-de-wiki-news | bb515e241a24534c26db001f5aa1792c92b80141 | 2020-07-01T08:27:21.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | WikinewsSum | null | WikinewsSum/bart-large-multi-de-wiki-news | 2 | null | transformers | 23,551 | Entry not found |
WikinewsSum/bart-large-multi-en-wiki-news | 8faa3b18f415790ddd11575c49b0a053576069d8 | 2020-07-01T08:33:12.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | WikinewsSum | null | WikinewsSum/bart-large-multi-en-wiki-news | 2 | null | transformers | 23,552 | Entry not found |
WikinewsSum/t5-base-multi-combine-wiki-news | 1d1bdcfc84a40d2480296fcdac71dcaf3da537d7 | 2021-06-23T10:37:43.000Z | [
"pytorch",
"jax",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | WikinewsSum | null | WikinewsSum/t5-base-multi-combine-wiki-news | 2 | null | transformers | 23,553 | Entry not found |
WikinewsSum/t5-base-multi-de-wiki-news | ffbf9a2e21f8be7f6c337c2e4ed6159f7169edbc | 2021-06-23T10:39:29.000Z | [
"pytorch",
"jax",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | WikinewsSum | null | WikinewsSum/t5-base-multi-de-wiki-news | 2 | null | transformers | 23,554 | Entry not found |
Wilson2021/mymodel1007 | 3262ef75dcb3f880387764c7eadf6c471097f61d | 2021-11-04T14:46:34.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Wilson2021 | null | Wilson2021/mymodel1007 | 2 | null | transformers | 23,555 | Entry not found |
WoutN2001/james3 | c733b081cbab51415cc309bca47fc617746be97b | 2021-11-09T12:47:58.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | WoutN2001 | null | WoutN2001/james3 | 2 | null | transformers | 23,556 | ---
tags:
- conversational
---
# waaaa |
Wzf/bert_fintuuing | 718a8f7e6101d5c3ccddd7817c3e967e5f717a88 | 2021-07-17T03:41:23.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Wzf | null | Wzf/bert_fintuuing | 2 | null | transformers | 23,557 | Entry not found |
XuguangAi/DialoGPT-small-Harry | 655a1f34590013fd141c5089718636f6a96b09e5 | 2021-12-03T06:18:22.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | XuguangAi | null | XuguangAi/DialoGPT-small-Harry | 2 | null | transformers | 23,558 | ---
tags:
- conversational
---
# Harry |
YYJ/KunquChat | 7c41f818aaeb7ce2396a721de119f6692a79b687 | 2021-12-23T07:21:17.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | YYJ | null | YYJ/KunquChat | 2 | null | transformers | 23,559 | # 经典昆曲欣赏 期末作业
## KunquChat
Author: 1900012921 俞跃江
|
Yankee/TEST21 | a0fcd23d88dbc2ffbb56af7f0d04496099510e8f | 2022-01-29T05:13:42.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Yankee | null | Yankee/TEST21 | 2 | null | transformers | 23,560 | Entry not found |
Yanzhu/bertweetfr_ner | 6c91439ac07f7d2fe1cde1b9e228b31958ba796e | 2021-09-29T14:46:25.000Z | [
"pytorch",
"camembert",
"token-classification",
"transformers",
"autotrain_compatible"
] | token-classification | false | Yanzhu | null | Yanzhu/bertweetfr_ner | 2 | null | transformers | 23,561 | French NER model for tweets. Fine-tuned on the CAP2017 dataset.
label_list = ['O',
'B-person',
'I-person',
'B-musicartist',
'I-musicartist',
'B-org',
'I-org',
'B-geoloc',
'I-geoloc',
'B-product',
'I-product',
'B-transportLine',
'I-transportLine',
'B-media',
'I-media',
'B-sportsteam',
'I-sportsteam',
'B-event',
'I-event',
'B-tvshow',
'I-tvshow',
'B-movie',
'I-movie',
'B-facility',
'I-facility',
'B-other',
'I-other'] |
Yoshisaur/kono-chat | 6087bb323d62ff718c01bd481ade8f02cc0604af | 2022-02-08T20:49:24.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Yoshisaur | null | Yoshisaur/kono-chat | 2 | null | transformers | 23,562 | Entry not found |
ZYW/en-de-es-model | 18f2a74f71a3b6cd572d79057f2eb8ee8a8ecfad | 2021-05-29T17:28:09.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers",
"model-index",
"autotrain_compatible"
] | question-answering | false | ZYW | null | ZYW/en-de-es-model | 2 | null | transformers | 23,563 | ---
model-index:
- name: en-de-es-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# en-de-es-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/en-de-model | 80b428e960ec7c174bd077fad1919e7610fc4454 | 2021-05-29T17:52:17.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers",
"model-index",
"autotrain_compatible"
] | question-answering | false | ZYW | null | ZYW/en-de-model | 2 | null | transformers | 23,564 | ---
model-index:
- name: en-de-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# en-de-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/squad-en-de-es-vi-zh-model | c1d23da63a828e4a74dfd8b86f803bb115cc86f7 | 2021-05-29T21:46:39.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers",
"model-index",
"autotrain_compatible"
] | question-answering | false | ZYW | null | ZYW/squad-en-de-es-vi-zh-model | 2 | null | transformers | 23,565 | ---
model-index:
- name: squad-en-de-es-vi-zh-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# squad-en-de-es-vi-zh-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/test-squad-trained | d6428fbfc69f8ab19d9032f2070603dc45859426 | 2021-05-26T02:38:39.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers",
"model-index",
"autotrain_compatible"
] | question-answering | false | ZYW | null | ZYW/test-squad-trained | 2 | null | transformers | 23,566 | ---
model-index:
- name: test-squad-trained
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# test-squad-trained
This model was trained from scratch on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.2026
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 0.988 | 1.0 | 5486 | 1.1790 |
| 0.7793 | 2.0 | 10972 | 1.2026 |
| 0.8068 | 3.0 | 16458 | 1.2026 |
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.6.2
- Tokenizers 0.10.3
|
Zane/Ricky3 | eb4d627fc1fcc8acfa771aee802cd441d4567506 | 2021-07-29T14:50:17.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational",
"license:mit"
] | conversational | false | Zane | null | Zane/Ricky3 | 2 | null | transformers | 23,567 | ---
thumbnail: https://huggingface.co/front/thumbnails/dialogpt.png
tags:
- conversational
license: mit
---
# DialoGPT Trained on the Speech of a Game Character
This is an instance of [microsoft/DialoGPT-small](https://huggingface.co/microsoft/DialoGPT-small) trained on a game character, Neku Sakuraba from [The World Ends With You](https://en.wikipedia.org/wiki/The_World_Ends_with_You). The data comes from [a Kaggle game script dataset](https://www.kaggle.com/ruolinzheng/twewy-game-script).
Chat with the model:
```python
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("r3dhummingbird/DialoGPT-small-neku")
model = AutoModelWithLMHead.from_pretrained("r3dhummingbird/DialoGPT-small-neku")
# Let's chat for 4 lines
for step in range(4):
# encode the new user input, add the eos_token and return a tensor in Pytorch
new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt')
# print(new_user_input_ids)
# append the new user input tokens to the chat history
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
# generated a response while limiting the total chat history to 1000 tokens,
chat_history_ids = model.generate(
bot_input_ids, max_length=200,
pad_token_id=tokenizer.eos_token_id,
no_repeat_ngram_size=3,
do_sample=True,
top_k=100,
top_p=0.7,
temperature=0.8
)
# pretty print last ouput tokens from bot
print("NekuBot: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
``` |
Zen1/test1 | 200244e51a723fb98b8199f71cc1ec18cc96bcbd | 2022-01-15T15:06:24.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Zen1 | null | Zen1/test1 | 2 | null | transformers | 23,568 | ---
tags:
- conversational
---
# My Awesome Model
|
ZikXewen/wav2vec2-large-xlsr-53-thai-demo | 097161310c6a35544399c1c2f09d494d31c86add | 2021-07-05T18:21:43.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | ZikXewen | null | ZikXewen/wav2vec2-large-xlsr-53-thai-demo | 2 | null | transformers | 23,569 | Entry not found |
Zixtrauce/BrandonBot | 059e25dc9f7c33af76b4a02d6be0e32d16181012 | 2021-12-31T06:28:20.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Zixtrauce | null | Zixtrauce/BrandonBot | 2 | null | transformers | 23,570 | ---
tags:
- conversational
---
#BrandonBot |
Zixtrauce/BrandonBot2 | b8039371765ab2515a1185bc862d7dc6f34d3e11 | 2022-01-01T22:09:01.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Zixtrauce | null | Zixtrauce/BrandonBot2 | 2 | null | transformers | 23,571 | ---
tags:
- conversational
---
#BrandonBot2 |
a01709042/DialoGPT-medium | d97bb1e9a4a3cf9d1465d7318078dda439da2869 | 2022-01-05T02:52:05.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | a01709042 | null | a01709042/DialoGPT-medium | 2 | null | transformers | 23,572 | ---
tags:
- conversational
---
# DialoGPT model fine tuned to conservative muslim discord messages |
aadelucia/GPT2_medium_narrative_finetuned_large | 30855d6bbacac0f1513f5e420e456f096726e968 | 2021-12-10T17:45:16.000Z | [
"pytorch",
"jax",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | aadelucia | null | aadelucia/GPT2_medium_narrative_finetuned_large | 2 | null | transformers | 23,573 | Please visit the repo for training details. https://github.com/AADeLucia/gpt2-narrative-decoding |
aadelucia/GPT2_small_narrative_finetuned_medium | 85f03d51a37b93df5c12fe1128771809c85305c2 | 2021-12-10T18:48:36.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | aadelucia | null | aadelucia/GPT2_small_narrative_finetuned_medium | 2 | null | transformers | 23,574 | Please visit the repo for training details. https://github.com/AADeLucia/gpt2-narrative-decoding |
abanoub1412/finetuning | e2c860dfd385b1bb48a2e883ff3edad6f1b21bd0 | 2021-07-04T19:43:42.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | abanoub1412 | null | abanoub1412/finetuning | 2 | null | transformers | 23,575 | Entry not found |
abarbosa/c4-aristo-roberta-large | 22022f76f8ea07815f4c308fa80913beccae022c | 2021-06-24T04:21:15.000Z | [
"pytorch",
"roberta",
"multiple-choice",
"transformers",
"model-index"
] | multiple-choice | false | abarbosa | null | abarbosa/c4-aristo-roberta-large | 2 | null | transformers | 23,576 | ---
metrics:
- accuracy
model-index:
- name: c4-aristo-roberta-large
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# c4-aristo-roberta-large
This model was trained from scratch on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.0332
- Accuracy: 0.7370
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 4
- eval_batch_size: 8
- seed: 42
- distributed_type: multi-GPU
- gradient_accumulation_steps: 16
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.8204 | 1.0 | 140 | 0.7246 | 0.7171 |
| 0.5512 | 2.0 | 280 | 0.7441 | 0.7312 |
| 0.3437 | 3.0 | 420 | 0.8940 | 0.7363 |
| 0.291 | 4.0 | 560 | 1.0332 | 0.7370 |
### Framework versions
- Transformers 4.6.1
- Pytorch 1.10.0.dev20210620+cu113
- Datasets 1.6.2
- Tokenizers 0.10.2
|
abhi1nandy2/Europarl-roberta-base | e6bcf2a98570febacbd8956cb623435167bc0d80 | 2022-05-23T20:09:39.000Z | [
"pytorch",
"jax",
"roberta",
"fill-mask",
"English",
"dataset:Europarl",
"transformers",
"Europarl",
"autotrain_compatible"
] | fill-mask | false | abhi1nandy2 | null | abhi1nandy2/Europarl-roberta-base | 2 | null | transformers | 23,577 | ---
language:
- English
tags:
- Europarl
- roberta
datasets:
- Europarl
---
Refer to https://aclanthology.org/2021.semeval-1.87/
## Citation
If you use this model in your work, please add the following citation -
```
@inproceedings{nandy-etal-2021-cs60075,
title = "cs60075{\_}team2 at {S}em{E}val-2021 Task 1 : Lexical Complexity Prediction using Transformer-based Language Models pre-trained on various text corpora",
author = "Nandy, Abhilash and
Adak, Sayantan and
Halder, Tanurima and
Pokala, Sai Mahesh",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.semeval-1.87",
doi = "10.18653/v1/2021.semeval-1.87",
pages = "678--682",
abstract = "The main contribution of this paper is to fine-tune transformer-based language models pre-trained on several text corpora, some being general (E.g., Wikipedia, BooksCorpus), some being the corpora from which the CompLex Dataset was extracted, and others being from other specific domains such as Finance, Law, etc. We perform ablation studies on selecting the transformer models and how their individual complexity scores are aggregated to get the resulting complexity scores. Our method achieves a best Pearson Correlation of 0.784 in sub-task 1 (single word) and 0.836 in sub-task 2 (multiple word expressions).",
}
```
|
abjbpi/DS_small | 9e37d83107cb1d05c0810fad11b26d9a77007c5d | 2021-06-04T11:23:14.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | abjbpi | null | abjbpi/DS_small | 2 | null | transformers | 23,578 | ---
tags:
- conversational
---
# Model v2 |
ad6398/gupshup_e2e_pegasus | 582a0841cf4096bec43cb48a6f21d9fef09121eb | 2021-09-07T09:54:59.000Z | [
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | ad6398 | null | ad6398/gupshup_e2e_pegasus | 2 | null | transformers | 23,579 | Entry not found |
adalbertojunior/test-128-uncased-2 | 6684547d98d7ea17c9ca7a01007f6ddb0e22889e | 2021-10-18T02:08:04.000Z | [
"pytorch",
"jax",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | adalbertojunior | null | adalbertojunior/test-128-uncased-2 | 2 | null | transformers | 23,580 | Entry not found |
adalbertojunior/test-128-uncased | 2c6488b317e985de58194d67cc138b245fcaffc4 | 2021-10-05T13:44:37.000Z | [
"pytorch",
"jax",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | adalbertojunior | null | adalbertojunior/test-128-uncased | 2 | null | transformers | 23,581 | Entry not found |
adalbertojunior/test-128 | 591be9335ad87746a0e71424a90ad46af309186b | 2021-10-01T13:59:07.000Z | [
"pytorch",
"jax",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | adalbertojunior | null | adalbertojunior/test-128 | 2 | null | transformers | 23,582 | Entry not found |
adam1224/dummy-model | e9e214ca5b8c0d83f78c0e1b064c5aa7dbcd371d | 2022-02-10T06:34:25.000Z | [
"pytorch",
"camembert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | adam1224 | null | adam1224/dummy-model | 2 | null | transformers | 23,583 | Entry not found |
adamlin/100perc | e7bc24c9018834e66be8a3a5af3b1bfb8a329681 | 2021-06-24T12:00:54.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0"
] | text-generation | false | adamlin | null | adamlin/100perc | 2 | null | transformers | 23,584 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- null
model_index:
- name: 100perc
results:
- task:
name: Causal Language Modeling
type: text-generation
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# 100perc
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4594
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 100.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| No log | 1.0 | 140 | 1.8292 |
| No log | 2.0 | 280 | 1.7373 |
| No log | 3.0 | 420 | 1.6889 |
| 2.26 | 4.0 | 560 | 1.6515 |
| 2.26 | 5.0 | 700 | 1.6258 |
| 2.26 | 6.0 | 840 | 1.6063 |
| 2.26 | 7.0 | 980 | 1.5873 |
| 1.6847 | 8.0 | 1120 | 1.5749 |
| 1.6847 | 9.0 | 1260 | 1.5634 |
| 1.6847 | 10.0 | 1400 | 1.5513 |
| 1.6073 | 11.0 | 1540 | 1.5421 |
| 1.6073 | 12.0 | 1680 | 1.5352 |
| 1.6073 | 13.0 | 1820 | 1.5270 |
| 1.6073 | 14.0 | 1960 | 1.5203 |
| 1.5545 | 15.0 | 2100 | 1.5142 |
| 1.5545 | 16.0 | 2240 | 1.5089 |
| 1.5545 | 17.0 | 2380 | 1.5048 |
| 1.5156 | 18.0 | 2520 | 1.5009 |
| 1.5156 | 19.0 | 2660 | 1.4970 |
| 1.5156 | 20.0 | 2800 | 1.4935 |
| 1.5156 | 21.0 | 2940 | 1.4897 |
| 1.4835 | 22.0 | 3080 | 1.4865 |
| 1.4835 | 23.0 | 3220 | 1.4851 |
| 1.4835 | 24.0 | 3360 | 1.4820 |
| 1.4565 | 25.0 | 3500 | 1.4787 |
| 1.4565 | 26.0 | 3640 | 1.4774 |
| 1.4565 | 27.0 | 3780 | 1.4749 |
| 1.4565 | 28.0 | 3920 | 1.4748 |
| 1.4326 | 29.0 | 4060 | 1.4728 |
| 1.4326 | 30.0 | 4200 | 1.4692 |
| 1.4326 | 31.0 | 4340 | 1.4692 |
| 1.4326 | 32.0 | 4480 | 1.4668 |
| 1.4126 | 33.0 | 4620 | 1.4664 |
| 1.4126 | 34.0 | 4760 | 1.4659 |
| 1.4126 | 35.0 | 4900 | 1.4643 |
| 1.394 | 36.0 | 5040 | 1.4622 |
| 1.394 | 37.0 | 5180 | 1.4629 |
| 1.394 | 38.0 | 5320 | 1.4610 |
| 1.394 | 39.0 | 5460 | 1.4623 |
| 1.3775 | 40.0 | 5600 | 1.4599 |
| 1.3775 | 41.0 | 5740 | 1.4600 |
| 1.3775 | 42.0 | 5880 | 1.4580 |
| 1.363 | 43.0 | 6020 | 1.4584 |
| 1.363 | 44.0 | 6160 | 1.4577 |
| 1.363 | 45.0 | 6300 | 1.4559 |
| 1.363 | 46.0 | 6440 | 1.4545 |
| 1.3484 | 47.0 | 6580 | 1.4568 |
| 1.3484 | 48.0 | 6720 | 1.4579 |
| 1.3484 | 49.0 | 6860 | 1.4562 |
| 1.3379 | 50.0 | 7000 | 1.4558 |
| 1.3379 | 51.0 | 7140 | 1.4556 |
| 1.3379 | 52.0 | 7280 | 1.4581 |
| 1.3379 | 53.0 | 7420 | 1.4554 |
| 1.3258 | 54.0 | 7560 | 1.4561 |
| 1.3258 | 55.0 | 7700 | 1.4553 |
| 1.3258 | 56.0 | 7840 | 1.4555 |
| 1.3258 | 57.0 | 7980 | 1.4572 |
| 1.3158 | 58.0 | 8120 | 1.4551 |
| 1.3158 | 59.0 | 8260 | 1.4573 |
| 1.3158 | 60.0 | 8400 | 1.4561 |
| 1.3072 | 61.0 | 8540 | 1.4557 |
| 1.3072 | 62.0 | 8680 | 1.4548 |
| 1.3072 | 63.0 | 8820 | 1.4547 |
| 1.3072 | 64.0 | 8960 | 1.4556 |
| 1.2986 | 65.0 | 9100 | 1.4555 |
| 1.2986 | 66.0 | 9240 | 1.4566 |
| 1.2986 | 67.0 | 9380 | 1.4558 |
| 1.2916 | 68.0 | 9520 | 1.4565 |
| 1.2916 | 69.0 | 9660 | 1.4552 |
| 1.2916 | 70.0 | 9800 | 1.4558 |
| 1.2916 | 71.0 | 9940 | 1.4553 |
| 1.2846 | 72.0 | 10080 | 1.4579 |
| 1.2846 | 73.0 | 10220 | 1.4572 |
| 1.2846 | 74.0 | 10360 | 1.4572 |
| 1.2792 | 75.0 | 10500 | 1.4564 |
| 1.2792 | 76.0 | 10640 | 1.4576 |
| 1.2792 | 77.0 | 10780 | 1.4571 |
| 1.2792 | 78.0 | 10920 | 1.4580 |
| 1.2736 | 79.0 | 11060 | 1.4578 |
| 1.2736 | 80.0 | 11200 | 1.4583 |
| 1.2736 | 81.0 | 11340 | 1.4576 |
| 1.2736 | 82.0 | 11480 | 1.4580 |
| 1.2699 | 83.0 | 11620 | 1.4575 |
| 1.2699 | 84.0 | 11760 | 1.4583 |
| 1.2699 | 85.0 | 11900 | 1.4588 |
| 1.2664 | 86.0 | 12040 | 1.4590 |
| 1.2664 | 87.0 | 12180 | 1.4593 |
| 1.2664 | 88.0 | 12320 | 1.4582 |
| 1.2664 | 89.0 | 12460 | 1.4591 |
| 1.2627 | 90.0 | 12600 | 1.4595 |
| 1.2627 | 91.0 | 12740 | 1.4585 |
| 1.2627 | 92.0 | 12880 | 1.4590 |
| 1.2613 | 93.0 | 13020 | 1.4590 |
| 1.2613 | 94.0 | 13160 | 1.4598 |
| 1.2613 | 95.0 | 13300 | 1.4592 |
| 1.2613 | 96.0 | 13440 | 1.4597 |
| 1.2591 | 97.0 | 13580 | 1.4593 |
| 1.2591 | 98.0 | 13720 | 1.4593 |
| 1.2591 | 99.0 | 13860 | 1.4597 |
| 1.258 | 100.0 | 14000 | 1.4594 |
### Framework versions
- Transformers 4.8.0
- Pytorch 1.8.1+cu111
- Datasets 1.8.0
- Tokenizers 0.10.3
|
adamlin/NCBI_BERT_pubmed_mimic_uncased_base_transformers | a4e7e868f7fb071d0bf17fc4f6bf1899cf8f98c5 | 2019-12-25T17:05:13.000Z | [
"pytorch",
"transformers"
] | null | false | adamlin | null | adamlin/NCBI_BERT_pubmed_mimic_uncased_base_transformers | 2 | null | transformers | 23,585 | Entry not found |
adamlin/tmp | c2a97b19fad5180a8f568f2147e15c647c17b78e | 2021-07-07T18:48:00.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"zh_CN",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible"
] | text2text-generation | false | adamlin | null | adamlin/tmp | 2 | null | transformers | 23,586 | ---
language:
- zh_CN
- zh_CN
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- bleu
model_index:
- name: tmp
results:
- task:
name: Translation
type: translation
metric:
name: Bleu
type: bleu
value: 0.0099
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# tmp
This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: nan
- Bleu: 0.0099
- Gen Len: 3.3917
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 1024
- eval_batch_size: 1024
- seed: 13
- gradient_accumulation_steps: 2
- total_train_batch_size: 2048
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 20.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|
| No log | 1.0 | 1 | nan | 0.0114 | 3.3338 |
| No log | 2.0 | 2 | nan | 0.0114 | 3.3338 |
| No log | 3.0 | 3 | nan | 0.0114 | 3.3338 |
| No log | 4.0 | 4 | nan | 0.0114 | 3.3338 |
| No log | 5.0 | 5 | nan | 0.0114 | 3.3338 |
| No log | 6.0 | 6 | nan | 0.0114 | 3.3338 |
| No log | 7.0 | 7 | nan | 0.0114 | 3.3338 |
| No log | 8.0 | 8 | nan | 0.0114 | 3.3338 |
| No log | 9.0 | 9 | nan | 0.0114 | 3.3338 |
| No log | 10.0 | 10 | nan | 0.0114 | 3.3338 |
| No log | 11.0 | 11 | nan | 0.0114 | 3.3338 |
| No log | 12.0 | 12 | nan | 0.0114 | 3.3338 |
| No log | 13.0 | 13 | nan | 0.0114 | 3.3338 |
| No log | 14.0 | 14 | nan | 0.0114 | 3.3338 |
| No log | 15.0 | 15 | nan | 0.0114 | 3.3338 |
| No log | 16.0 | 16 | nan | 0.0114 | 3.3338 |
| No log | 17.0 | 17 | nan | 0.0114 | 3.3338 |
| No log | 18.0 | 18 | nan | 0.0114 | 3.3338 |
| No log | 19.0 | 19 | nan | 0.0114 | 3.3338 |
| No log | 20.0 | 20 | nan | 0.0114 | 3.3338 |
### Framework versions
- Transformers 4.8.2
- Pytorch 1.8.1+cu111
- Datasets 1.9.0
- Tokenizers 0.10.3
|
addy88/wav2vec-odia-stt | 78b13b2777c5f33bd5aeb29ac50fbbe15e6bf36b | 2021-12-19T15:56:01.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | addy88 | null | addy88/wav2vec-odia-stt | 2 | null | transformers | 23,587 | ## Usage
The model can be used directly (without a language model) as follows:
```python
import soundfile as sf
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import argparse
def parse_transcription(wav_file):
# load pretrained model
processor = Wav2Vec2Processor.from_pretrained("addy88/wav2vec2-odia-stt")
model = Wav2Vec2ForCTC.from_pretrained("addy88/wav2vec2-odia-stt")
# load audio
audio_input, sample_rate = sf.read(wav_file)
# pad input values and return pt tensor
input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values
# INFERENCE
# retrieve logits & take argmax
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
# transcribe
transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
print(transcription)
``` |
addy88/wav2vec2-assamese-stt | 85ddc928e281ab1e779dec01949e055c008c8c7f | 2021-12-19T16:55:56.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | addy88 | null | addy88/wav2vec2-assamese-stt | 2 | null | transformers | 23,588 | ## Usage
The model can be used directly (without a language model) as follows:
```python
import soundfile as sf
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import argparse
def parse_transcription(wav_file):
# load pretrained model
processor = Wav2Vec2Processor.from_pretrained("addy88/addy88/wav2vec2-assamese-stt")
model = Wav2Vec2ForCTC.from_pretrained("addy88/addy88/wav2vec2-assamese-stt")
# load audio
audio_input, sample_rate = sf.read(wav_file)
# pad input values and return pt tensor
input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values
# INFERENCE
# retrieve logits & take argmax
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
# transcribe
transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
print(transcription)
``` |
addy88/wav2vec2-dogri-stt | d199c9e21ef97ad3afa5f9391698efb286b2ab8b | 2021-12-19T16:43:44.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | addy88 | null | addy88/wav2vec2-dogri-stt | 2 | null | transformers | 23,589 | ## Usage
The model can be used directly (without a language model) as follows:
```python
import soundfile as sf
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import argparse
def parse_transcription(wav_file):
# load pretrained model
processor = Wav2Vec2Processor.from_pretrained("addy88/wav2vec2-dogri-stt")
model = Wav2Vec2ForCTC.from_pretrained("addy88/wav2vec2-dogri-stt")
# load audio
audio_input, sample_rate = sf.read(wav_file)
# pad input values and return pt tensor
input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values
# INFERENCE
# retrieve logits & take argmax
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
# transcribe
transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
print(transcription)
``` |
addy88/wav2vec2-large-xls-r-300m-hindi-colab | 24704948cfdaea65b5e3f9c742b3450cb965401a | 2021-12-09T13:07:18.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | addy88 | null | addy88/wav2vec2-large-xls-r-300m-hindi-colab | 2 | null | transformers | 23,590 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: wav2vec2-large-xls-r-300m-hindi-colab
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-hindi-colab
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.16.1
- Tokenizers 0.10.3
|
adhisetiawan/test-vit | a2d0af0108bcd555a35fd31851d648eb20c67dca | 2021-11-25T16:44:43.000Z | [
"pytorch",
"vit",
"feature-extraction",
"transformers"
] | feature-extraction | false | adhisetiawan | null | adhisetiawan/test-vit | 2 | null | transformers | 23,591 | Entry not found |
adhisetiawan/vit-resisc45 | 27a8086680cd3fa4b3d209a345e229be1fed86c5 | 2022-01-16T02:52:55.000Z | [
"pytorch",
"vit",
"feature-extraction",
"transformers"
] | feature-extraction | false | adhisetiawan | null | adhisetiawan/vit-resisc45 | 2 | null | transformers | 23,592 | Entry not found |
adilism/wav2vec2-large-xlsr-kyrgyz | 1569d7aa1aa4606ea36940d39360f7df61592b76 | 2021-07-05T18:50:45.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"ky",
"dataset:common_voice",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | adilism | null | adilism/wav2vec2-large-xlsr-kyrgyz | 2 | null | transformers | 23,593 | ---
language: ky
datasets:
- common_voice
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: {Wav2Vec2-XLSR-53 Kyrgyz by adilism}
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice ky
type: common_voice
args: ky
metrics:
- name: Test WER
type: wer
value: 34.08
---
# Wav2Vec2-Large-XLSR-53-Kyrgyz
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Kyrgyz using the [Common Voice](https://huggingface.co/datasets/common_voice) dataset.
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "ky", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("adilism/wav2vec2-large-xlsr-kyrgyz")
model = Wav2Vec2ForCTC.from_pretrained("adilism/wav2vec2-large-xlsr-kyrgyz")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Kyrgyz test data of Common Voice:
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "ky", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("adilism/wav2vec2-large-xlsr-kyrgyz")
model = Wav2Vec2ForCTC.from_pretrained("adilism/wav2vec2-large-xlsr-kyrgyz")
model.to("cuda")
chars_to_ignore = [",", "?", ".", "!", "-", ";", ":", "—", "–", "”"]
chars_to_ignore_regex = f'[{"".join(chars_to_ignore)}]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 34.08 %
## Training
The Common Voice `train` and `validation` datasets were used for training.
|
aditeyabaral/additionalpretrained-bert-base-cased | 4cf545788e4f50a82e495641d0bf58ed2a83d120 | 2021-10-21T09:49:57.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | aditeyabaral | null | aditeyabaral/additionalpretrained-bert-base-cased | 2 | null | transformers | 23,594 | Entry not found |
aditeyabaral/additionalpretrained-bert-hinglish-big | ecc086a5872ad6789675aba8a70338083b702ee2 | 2021-10-20T18:23:17.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | aditeyabaral | null | aditeyabaral/additionalpretrained-bert-hinglish-big | 2 | null | transformers | 23,595 | Entry not found |
aditeyabaral/additionalpretrained-contrastive-roberta-base | 5551ead972a0164390463ca2ef1c6ae9d09e8e78 | 2021-11-13T13:28:39.000Z | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | aditeyabaral | null | aditeyabaral/additionalpretrained-contrastive-roberta-base | 2 | null | transformers | 23,596 | Entry not found |
aditeyabaral/additionalpretrained-distilbert-hinglish-big | d8bfa36593580a9a8b658c18ae1e34ce844817b9 | 2021-10-20T18:31:29.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | aditeyabaral | null | aditeyabaral/additionalpretrained-distilbert-hinglish-big | 2 | null | transformers | 23,597 | Entry not found |
aditeyabaral/additionalpretrained-distilbert-hinglish-small | 3b24b84599512d428c23e36a846d26e5532ff9ce | 2021-10-20T18:33:10.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | aditeyabaral | null | aditeyabaral/additionalpretrained-distilbert-hinglish-small | 2 | null | transformers | 23,598 | Entry not found |
aditeyabaral/bert-hinglish-small | 7561ccbab9be1f1fbd98e303b3e0b99835074def | 2021-09-25T23:45:37.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | aditeyabaral | null | aditeyabaral/bert-hinglish-small | 2 | null | transformers | 23,599 | Entry not found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.