modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Jeevesh8/feather_berts_50 | 095bc2f26fa4238538ca50ca027c2344eba93a4a | 2022-04-20T13:34:25.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_50 | 4 | null | transformers | 19,400 | Entry not found |
Jeevesh8/feather_berts_51 | eedec6b71eb7c4918912442b3912ae53d3af288f | 2022-04-20T13:34:55.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_51 | 4 | null | transformers | 19,401 | Entry not found |
Jeevesh8/feather_berts_52 | edbac651624547affc6d67004f2cfd1a86c19d29 | 2022-04-20T13:35:20.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_52 | 4 | null | transformers | 19,402 | Entry not found |
Jeevesh8/feather_berts_53 | da2e51734d1ca5b4407e0e0d00adce2cac729b1f | 2022-04-20T13:35:46.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_53 | 4 | null | transformers | 19,403 | Entry not found |
Jeevesh8/feather_berts_54 | 8ad3c3304aa812c2a316ca7541c75846b4d587de | 2022-04-20T13:36:12.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_54 | 4 | null | transformers | 19,404 | Entry not found |
Jeevesh8/feather_berts_55 | 737a7022ef074e846a08089c037a4e1f60dbcc23 | 2022-04-20T13:36:36.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_55 | 4 | null | transformers | 19,405 | Entry not found |
Jeevesh8/feather_berts_56 | fe03d1e8e4d89aadeeae81009b589a439f9b9e1a | 2022-04-20T13:37:18.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_56 | 4 | null | transformers | 19,406 | Entry not found |
Jeevesh8/feather_berts_57 | 231fa0dbebf7c23c9956367270076f5129574ae2 | 2022-04-20T13:37:43.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_57 | 4 | null | transformers | 19,407 | Entry not found |
Jeevesh8/feather_berts_58 | 1f10ce43c242dad74d1aa3659bfc7e23e2fbade9 | 2022-04-20T13:38:09.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_58 | 4 | null | transformers | 19,408 | Entry not found |
Jeevesh8/feather_berts_59 | b606443767d18233ec4a875a9ea16dce1cf1a0e6 | 2022-04-20T13:38:36.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_59 | 4 | null | transformers | 19,409 | Entry not found |
Jeevesh8/feather_berts_60 | c880e1a797d05529ebdbe7740b86346ca117977b | 2022-04-20T13:39:03.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_60 | 4 | null | transformers | 19,410 | Entry not found |
Jeevesh8/feather_berts_61 | e7016f5446550fa2897f6d7186b10065f550b2a6 | 2022-04-20T13:39:30.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_61 | 4 | null | transformers | 19,411 | Entry not found |
Jeevesh8/feather_berts_62 | aae603a34ce39c4ce47e28aaecdccb8b4a54ef51 | 2022-04-20T13:39:57.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_62 | 4 | null | transformers | 19,412 | Entry not found |
Jeevesh8/feather_berts_63 | d44845bb56d80dbc3ab2d4e44897004b8d93c861 | 2022-04-20T13:40:23.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_63 | 4 | null | transformers | 19,413 | Entry not found |
Jeevesh8/feather_berts_64 | 4328531517c2feadb0bf0905de40ecc1d30ed9cb | 2022-04-20T13:40:51.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_64 | 4 | null | transformers | 19,414 | Entry not found |
Jeevesh8/feather_berts_65 | 0d6692655fe3e4c8ac532add5da0f694c07e0a54 | 2022-04-20T13:41:22.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_65 | 4 | null | transformers | 19,415 | Entry not found |
Jeevesh8/feather_berts_66 | 8eea60349e5ef8baab0b3cafd77f436264ac93e4 | 2022-04-20T13:41:50.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_66 | 4 | null | transformers | 19,416 | Entry not found |
Jeevesh8/feather_berts_68 | 283f8c5dc4b184c0c901ee6232b3c3747a5bae99 | 2022-04-20T13:42:45.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_68 | 4 | null | transformers | 19,417 | Entry not found |
Jeevesh8/feather_berts_69 | 34c8c96af6651b9a11bdab96d09b50f0abad2b0f | 2022-04-20T13:43:12.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_69 | 4 | null | transformers | 19,418 | Entry not found |
Jeevesh8/feather_berts_70 | 8d55d0d9d8c7870449524c983e148781ea1d9457 | 2022-04-20T13:43:39.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_70 | 4 | null | transformers | 19,419 | Entry not found |
Jeevesh8/feather_berts_71 | 21b37021fbf4ee1f3c2b762c26986cd1e308ebd8 | 2022-04-20T13:44:05.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_71 | 4 | null | transformers | 19,420 | Entry not found |
Jeevesh8/feather_berts_72 | 8f8b44d85ff81d1ac1804d380381741445a82d96 | 2022-04-20T13:44:32.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_72 | 4 | null | transformers | 19,421 | Entry not found |
Jeevesh8/feather_berts_74 | d89a9332d58f449b683bba5b352043f930e26b62 | 2022-04-20T13:45:30.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_74 | 4 | null | transformers | 19,422 | Entry not found |
Jeevesh8/feather_berts_75 | 67f67854fc22cec797a6622d45edd3393823da87 | 2022-04-20T13:45:57.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_75 | 4 | null | transformers | 19,423 | Entry not found |
Jeevesh8/feather_berts_76 | 9cc114a4e73645943f86cc00e198ac14255779e3 | 2022-04-20T13:46:25.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_76 | 4 | null | transformers | 19,424 | Entry not found |
Jeevesh8/feather_berts_78 | c5bcf645834f97f3d2d8eca638f39ba95d10833b | 2022-04-20T13:47:19.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_78 | 4 | null | transformers | 19,425 | Entry not found |
Jeevesh8/feather_berts_79 | cc503f5de7dcdf4f6391c8ceef4ca2e59764b967 | 2022-04-20T13:47:47.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_79 | 4 | null | transformers | 19,426 | Entry not found |
Jeevesh8/feather_berts_80 | a4fd7f2dd20e2679266f713b39d7021c4c29923c | 2022-04-20T13:48:13.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_80 | 4 | null | transformers | 19,427 | Entry not found |
Jeevesh8/feather_berts_81 | c7d2d772610368beeca899ba2db6d5515e4711eb | 2022-04-20T13:48:41.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_81 | 4 | null | transformers | 19,428 | Entry not found |
Jeevesh8/feather_berts_82 | 20d27fd09511d4a9b1df64ff208a2bf1a35fc8a6 | 2022-04-20T13:49:09.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_82 | 4 | null | transformers | 19,429 | Entry not found |
Jeevesh8/feather_berts_83 | 301d7b3e1a2fb167324e04aa9af8f98ab6a4d6f7 | 2022-04-20T13:49:36.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_83 | 4 | null | transformers | 19,430 | Entry not found |
Jeevesh8/feather_berts_84 | fcc362c19e968a18b7d737195edea0f04bb10645 | 2022-04-20T13:50:04.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_84 | 4 | null | transformers | 19,431 | Entry not found |
Jeevesh8/feather_berts_86 | e2967778d8b19305d43b4e0544b72c6a3d32fad0 | 2022-04-20T13:50:55.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_86 | 4 | null | transformers | 19,432 | Entry not found |
Jeevesh8/feather_berts_87 | f16b675de8edddf27e85d3e0d5230ddd28eb645b | 2022-04-20T13:51:20.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_87 | 4 | null | transformers | 19,433 | Entry not found |
Jeevesh8/feather_berts_88 | cbeeff0c68905285bdb692d7bfb1934118e37031 | 2022-04-20T13:52:04.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_88 | 4 | null | transformers | 19,434 | Entry not found |
Jeevesh8/feather_berts_89 | a8c620920c1385c5a9a78582ddcf6b507079fe03 | 2022-04-20T13:52:41.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_89 | 4 | null | transformers | 19,435 | Entry not found |
Jeevesh8/feather_berts_90 | 239910589dacb9c66ee4fc0ca86490d0008d8cc3 | 2022-04-20T13:53:06.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_90 | 4 | null | transformers | 19,436 | Entry not found |
Jeevesh8/feather_berts_91 | d54d3b3ef115d86cf6a2f295d5865a4387f7b20d | 2022-04-20T13:53:32.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_91 | 4 | null | transformers | 19,437 | Entry not found |
Jeevesh8/feather_berts_92 | 1995862bd96739c086e6798b270ae50f947b9aa5 | 2022-04-20T13:53:58.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_92 | 4 | null | transformers | 19,438 | Entry not found |
Jeevesh8/feather_berts_94 | 7dc4ac6ae5c57eee73bb5932616aa095304e0300 | 2022-04-20T13:54:56.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_94 | 4 | null | transformers | 19,439 | Entry not found |
Jeevesh8/feather_berts_96 | 70dde6cf5c9a7c35617b0f28abe59b1418b2fa9e | 2022-04-20T13:55:49.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_96 | 4 | null | transformers | 19,440 | Entry not found |
Jeevesh8/feather_berts_97 | 546550eeeabe728e784bc45ebd8f5bb5323ff467 | 2022-04-20T13:56:14.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_97 | 4 | null | transformers | 19,441 | Entry not found |
Jeevesh8/feather_berts_98 | b114ee687805cad88a76fc1dc0f2044df5bc0623 | 2022-04-20T13:56:57.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_98 | 4 | null | transformers | 19,442 | Entry not found |
Jeevesh8/feather_berts_99 | c21f847b65e5c9a1d4495fdf755236bd4287950f | 2022-04-20T13:57:23.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_99 | 4 | null | transformers | 19,443 | Entry not found |
Finnish-NLP/t5-tiny-nl6-finnish | 8fc6c94aa5fc8bb72d31a1ff150426f940981ac7 | 2022-07-12T13:05:38.000Z | [
"pytorch",
"jax",
"tensorboard",
"t5",
"text2text-generation",
"fi",
"dataset:Finnish-NLP/mc4_fi_cleaned",
"dataset:wikipedia",
"arxiv:1910.10683",
"arxiv:2002.05202",
"arxiv:2109.10686",
"transformers",
"finnish",
"t5x",
"seq2seq",
"license:apache-2.0",
"autotrain_compatible"
] | text2text-generation | false | Finnish-NLP | null | Finnish-NLP/t5-tiny-nl6-finnish | 4 | null | transformers | 19,444 | ---
language:
- fi
license: apache-2.0
tags:
- finnish
- t5
- t5x
- seq2seq
datasets:
- Finnish-NLP/mc4_fi_cleaned
- wikipedia
inference: false
---
# T5-tiny-nl6 for Finnish
Pretrained T5 model on Finnish language using a span-based masked language modeling (MLM) objective. T5 was introduced in
[this paper](https://arxiv.org/abs/1910.10683)
and first released at [this page](https://github.com/google-research/text-to-text-transfer-transformer).
**Note:** The Hugging Face inference widget is deactivated because this model needs a text-to-text fine-tuning on a specific downstream task to be useful in practice. As an example of a fine-tuned Finnish T5 model, you can check [Finnish-NLP/t5-small-nl24-casing-punctuation-correction](https://huggingface.co/Finnish-NLP/t5-small-nl24-casing-punctuation-correction) which has been fine-tuned to correct missing casing and punctuation for Finnish text.
## Model description
T5 is an encoder-decoder model and treats all NLP problems in a text-to-text format.
Finnish T5 is a transformers model pretrained on a very large corpus of Finnish data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and outputs from those texts.
More precisely, it was pretrained with the span-based masked language modeling (MLM) objective. Spans of the input sequence are masked by so-called sentinel tokens (a.k.a unique mask tokens) and the output sequence is formed as a concatenation of the same sentinel tokens and the real masked tokens. This way, the model learns an inner representation of the Finnish language.
This model used the [T5 v1.1](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) improvements compared to the original T5 model during the pretraining:
- GEGLU activation in feed-forward hidden layer, rather than ReLU - see [here](https://arxiv.org/abs/2002.05202)
- Dropout was turned off in pretraining (quality win). Dropout should be re-enabled during fine-tuning
- Pretrained on span-based masked language modeling (MLM) objective only without mixing in the downstream tasks
- No parameter sharing between embedding and classifier layer
This model also used the "efficient" T5 architecture findings presented in [this paper](https://arxiv.org/abs/2109.10686). In a nutshell, the paper indicates that a Deep-Narrow model architecture is favorable for downstream performance compared to other model architectures of similar parameter count. To be more precise, model depth is defined as the number of transformer blocks that are stacked sequentially.
This model uses the [t5-efficient-tiny-nl6](https://huggingface.co/google/t5-efficient-tiny-nl6) architecture's layer depth which means both the encoder and the decoder have 6 transformer layers compared to the original T5 "tiny" model's architecture of 4 transformer layers.
In total, this model has 31 million parameters.
## Intended uses & limitations
This model was only pretrained in a self-supervised way excluding any supervised training. Therefore, this model has to be fine-tuned before it is usable on a downstream task, like text classification, unlike the Google's original T5 model. **Note:** You most likely need to fine-tune these T5 models without mixed precision so fine-tune them with full fp32 precision. You can also find more fine-tuning tips from [here](https://discuss.huggingface.co/t/t5-finetuning-tips), for example.
### How to use
Here is how to use this model in PyTorch:
```python
from transformers import T5Tokenizer, T5ForConditionalGeneration
tokenizer = T5Tokenizer.from_pretrained("Finnish-NLP/t5-tiny-nl6-finnish")
model = T5ForConditionalGeneration.from_pretrained("Finnish-NLP/t5-tiny-nl6-finnish")
```
and in TensorFlow:
```python
from transformers import T5Tokenizer, TFT5ForConditionalGeneration
tokenizer = T5Tokenizer.from_pretrained("Finnish-NLP/t5-tiny-nl6-finnish")
model = T5ForConditionalGeneration.from_pretrained("Finnish-NLP/t5-tiny-nl6-finnish", from_pt=True)
```
### Limitations and bias
The training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral. Therefore, the model can have biased predictions. This bias will also affect all fine-tuned versions of this model.
## Training data
This Finnish T5 model was pretrained on the combination of six datasets:
- [mc4_fi_cleaned](https://huggingface.co/datasets/Finnish-NLP/mc4_fi_cleaned), the dataset mC4 is a multilingual colossal, cleaned version of Common Crawl's web crawl corpus. We used the Finnish subset of the mC4 dataset and further cleaned it with our own text data cleaning codes (check the dataset repo).
- [wikipedia](https://huggingface.co/datasets/wikipedia) We used the Finnish subset of the wikipedia (August 2021) dataset
- [Yle Finnish News Archive 2011-2018](http://urn.fi/urn:nbn:fi:lb-2017070501)
- [Yle Finnish News Archive 2019-2020](http://urn.fi/urn:nbn:fi:lb-2021050401)
- [Finnish News Agency Archive (STT)](http://urn.fi/urn:nbn:fi:lb-2018121001)
- [The Suomi24 Sentences Corpus](http://urn.fi/urn:nbn:fi:lb-2020021803)
Raw datasets were automatically cleaned to filter out bad quality and non-Finnish examples. Also, a [perplexity](https://huggingface.co/course/chapter7/3#perplexity-for-language-models) score was calculated for all texts with a KenLM model which was trained with very clean Finnish texts only. This perplexity score can then be used to determine how "clean" Finnish language the text contains. Lastly, all datasets were concatenated and the top 90% perplexity score was used as a filtering threshold to filter out the worst quality 10% of texts. Together these cleaned datasets were around 76GB of text.
## Training procedure
### Preprocessing
The texts are tokenized using WordPiece and a vocabulary size of 32000. The inputs and the outputs are sequences of 512 consecutive tokens. Texts are not lower cased so this model is case-sensitive: it makes a difference between finnish and Finnish.
### Pretraining
The model was trained on TPUv3-8 VM, sponsored by the [Google TPU Research Cloud](https://sites.research.google/trc/about/), for 500K steps with a batch size of 512 (in total 131B tokens). The optimizer used was a AdaFactor with learning rate warmup for 10K steps with a constant learning rate of 1e-2, and then an inverse square root decay (exponential decay) of the learning rate after.
Training code was from the Google's Jax/Flax based [t5x framework](https://github.com/google-research/t5x) and also some t5x task definitions were adapted from [Per's t5x work](https://huggingface.co/pere).
## Evaluation results
Evaluation was done by fine-tuning the model on a downstream text classification task with two different labeled Finnish datasets: [Yle News](https://github.com/spyysalo/yle-corpus) and [Eduskunta](https://github.com/aajanki/eduskunta-vkk). Classification fine-tuning was done with a sequence length of 128 tokens.
When fine-tuned on those datasets, this model (the first row of the table) achieves the following accuracy results compared to our other T5 models and their parameter counts:
| | Model parameters | Yle News accuracy | Eduskunta accuracy |
|-------------------------------------------------------|------------------|---------------------|----------------------|
|Finnish-NLP/t5-tiny-nl6-finnish | 31 million |92.80 |69.07 |
|Finnish-NLP/t5-mini-nl8-finnish | 72 million |93.89 |71.43 |
|Finnish-NLP/t5-small-nl24-finnish | 260 million |**94.68** |74.90 |
|Finnish-NLP/byt5-base-finnish | 582 million |92.33 |73.13 |
|Finnish-NLP/t5-base-nl36-finnish | 814 million |94.40 |**75.97** |
|Finnish-NLP/t5-large-nl36-finnish | 1425 million |TBA |TBA |
Fine-tuning Google's multilingual mT5 models on the same datasets we can clearly see that our monolingual Finnish T5 models achieve much better results on Finnish text classification:
| | Model parameters | Yle News accuracy | Eduskunta accuracy |
|-------------------------------------------------------|------------------|---------------------|----------------------|
|google/mt5-small | 301 million |91.51 |64.10 |
|google/mt5-base | 583 million |92.71 |68.40 |
## Acknowledgements
This project would not have been possible without compute generously provided by Google through the
[TPU Research Cloud](https://sites.research.google/trc/).
## Team Members
- Aapo Tanskanen, [Hugging Face profile](https://huggingface.co/aapot), [LinkedIn profile](https://www.linkedin.com/in/aapotanskanen/)
- Rasmus Toivanen, [Hugging Face profile](https://huggingface.co/RASMUS), [LinkedIn profile](https://www.linkedin.com/in/rasmustoivanen/)
Feel free to contact us for more details 🤗 |
frozenwalker/SciFive_pubmedqa_question_generation_using_numerical_prompt_entity | a856513f2a239220423b297f0f73f24429904e04 | 2022-04-20T19:59:38.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | frozenwalker | null | frozenwalker/SciFive_pubmedqa_question_generation_using_numerical_prompt_entity | 4 | null | transformers | 19,445 | Entry not found |
birgermoell/common-voice-lithuanian-fairseq | 7375a59151a3573810ebade422d0595c2de4d95f | 2022-04-21T13:19:30.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"lt",
"dataset:common_voice",
"transformers",
"common_voice",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | birgermoell | null | birgermoell/common-voice-lithuanian-fairseq | 4 | null | transformers | 19,446 | ---
language:
- lt
license: apache-2.0
tags:
- automatic-speech-recognition
- common_voice
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: wav2vec2-common_voice-lithuanian-fairseq
results: []
---
# wav2vec2-common_voice-lithuanian-fairseq
|
satish860/sms_spam_detection-manning | 72533bd227203b2604899e1c7bb41196645b3498 | 2022-04-22T02:22:56.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | satish860 | null | satish860/sms_spam_detection-manning | 4 | null | transformers | 19,447 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
model-index:
- name: sms_spam_detection-manning
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# sms_spam_detection-manning
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0512
- Accuracy: 0.9886
- F1: 0.9573
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0a0+17540c5
- Datasets 2.1.0
- Tokenizers 0.12.1
|
Parsa/Buchwald-Hartwig-Yield-prediction | 89b4205edffca3bd9c4c9d238878fb18750c95e8 | 2022-05-04T06:51:13.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | false | Parsa | null | Parsa/Buchwald-Hartwig-Yield-prediction | 4 | null | transformers | 19,448 | Buchwald-Hartwig-Yield-prediction is a finetuned model based on 'DeepChem/ChemBERTa-77M-MLM' for yield prediction.
For training and testing the model, 'https://tdcommons.ai/single_pred_tasks/yields' data was used with 70/30 random splitting for the train and test dataset.
the R2 score is equal to 97.2879% and val_loss is equal to 0.0020.
for using it, your input should look like the following: 'reactant smiles''>>''product' with no spaces. For using it, do not use the Hosted inference API. instead, download it yourself or use the colab link below.
[](https://colab.research.google.com/drive/1UyQwPaHmH5BiEa0yZyuZPmMsVi-hIms0#scrollTo=DKy4QptyYTqz)
Github repo: https://github.com/mephisto121/Buchwald-Hartwig-Yield-prediction
|
junnyu/chinese_GAU-alpha-char_L-24_H-768 | 119c0c1d7fbe8ec043d1f9bc56ce3e81a3b89e2f | 2022-05-11T03:29:46.000Z | [
"pytorch",
"gau_alpha",
"fill-mask",
"zh",
"transformers",
"gau alpha",
"torch",
"autotrain_compatible"
] | fill-mask | false | junnyu | null | junnyu/chinese_GAU-alpha-char_L-24_H-768 | 4 | 1 | transformers | 19,449 | ---
language: zh
tags:
- gau alpha
- torch
inference: False
---
# pytorch 代码
https://github.com/JunnYu/GAU-alpha-pytorch
# bert4keras代码
https://github.com/ZhuiyiTechnology/GAU-alpha
# Install
```bash
pip install git+https://github.com/JunnYu/GAU-alpha-pytorch.git
or
pip install gau_alpha
```
## 评测对比
### CLUE-dev榜单分类任务结果,base版本。
| | iflytek | tnews | afqmc | cmnli | ocnli | wsc | csl |
| :-----: | :-----: | :---: | :---: | :---: | :---: | :---: | :---: |
| BERT | 60.06 | 56.80 | 72.41 | 79.56 | 73.93 | 78.62 | 83.93 |
| RoBERTa | 60.64 | 58.06 | 74.05 | **81.24** | 76.00 | 87.50 | 84.50 |
| RoFormer | 60.91 | 57.54 | 73.52 | 80.92 | 76.07 | 86.84 | 84.63 |
| RoFormerV2<sup>*</sup> | 60.87 | 56.54 | 72.75 | 80.34 | 75.36 | 80.92 | 84.67 |
| GAU-α | 61.41 | 57.76 | 74.17 | 81.82 | 75.86 | 79.93 | 85.67 |
| RoFormerV2-pytorch| **62.87** | **59.03** | **76.20** | 80.85 | **79.73** | **87.82** | **91.87** |
| GAU-α-pytorch(Adafactor) | 61.18 | 57.52 | 73.42 | 80.91 | 75.69 | 80.59 | 85.5 |
| GAU-α-pytorch(AdamW wd0.01 warmup0.1) | 60.68 | 57.95 | 73.08 | 81.02 | 75.36 | 81.25 | 83.93 |
### CLUE-test榜单分类任务结果,base版本。
| | iflytek | tnews | afqmc | cmnli | ocnli | wsc | csl |
| :-----: | :-----: | :---: | :---: | :---: | :---: | :---: | :---: |
| RoFormerV2-pytorch | **63.15** | **58.24** | **75.42** | **80.59** | **74.17** | **83.79** | 83.73 |
| GAU-α-pytorch(Adafactor) | 61.38 | 57.08 | 74.05 | 80.37 | 73.53 | 74.83 | **85.6** |
| GAU-α-pytorch(AdamW wd0.01 warmup0.1) | 60.54 | 57.67 | 72.44 | 80.32 | 72.97 | 76.55 | 84.13 |
### CLUE-dev集榜单阅读理解和NER结果
| | cmrc2018 | c3 | chid | cluener |
| :-----: | :-----: | :---: | :---: | :---: |
| BERT | 56.17 | 60.54 | 85.69 | 79.45 |
| RoBERTa | 56.54 | 67.66 | 86.71 | 79.47 |
| RoFormer | 56.26 | 67.24 | 86.57 | 79.72 |
| RoFormerV2<sup>*</sup> | 57.91 | 64.62 | 85.09 | **81.08** |
| GAU-α | **58.09** | **68.24** | **87.91** | 80.01 |
### 注:
- 其中RoFormerV2<sup>*</sup>表示的是未进行多任务学习的RoFormerV2模型,该模型苏神并未开源,感谢苏神的提醒。
- 其中不带有pytorch后缀结果都是从[GAU-alpha](https://github.com/ZhuiyiTechnology/GAU-alpha)仓库复制过来的。
- 其中带有pytorch后缀的结果都是自己训练得出的。
# Usage
```python
import torch
from gau_alpha import GAUAlphaForMaskedLM, GAUAlphaTokenizer
text = "今天[MASK]很好,我[MASK]去公园玩。"
tokenizer = GAUAlphaTokenizer.from_pretrained(
"junnyu/chinese_GAU-alpha-char_L-24_H-768"
)
pt_model = GAUAlphaForMaskedLM.from_pretrained(
"junnyu/chinese_GAU-alpha-char_L-24_H-768"
)
pt_inputs = tokenizer(text, return_tensors="pt")
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).logits[0]
pt_outputs_sentence = "pytorch: "
for i, id in enumerate(tokenizer.encode(text)):
if id == tokenizer.mask_token_id:
val, idx = pt_outputs[i].softmax(-1).topk(k=5)
tokens = tokenizer.convert_ids_to_tokens(idx)
new_tokens = []
for v, t in zip(val.cpu(), tokens):
new_tokens.append(f"{t}+{round(v.item(),4)}")
pt_outputs_sentence += "[" + "||".join(new_tokens) + "]"
else:
pt_outputs_sentence += "".join(
tokenizer.convert_ids_to_tokens([id], skip_special_tokens=True)
)
print(pt_outputs_sentence)
# pytorch: 今天[天+0.8657||气+0.0535||阳+0.0165||,+0.0126||晴+0.0111]很好,我[要+0.4619||想+0.4352||又+0.0252||就+0.0157||跑+0.0064]去公园玩。
```
# Reference
Bibtex:
```tex
@techreport{gau-alpha,
title={GAU-α: GAU-based Transformers for NLP - ZhuiyiAI},
author={Jianlin Su, Shengfeng Pan, Bo Wen, Yunfeng Liu},
year={2022},
url="https://github.com/ZhuiyiTechnology/GAU-alpha",
}
```
|
bdickson/distilbert-base-uncased-finetuned-cola | a6cb8aaa0f144080b7d17bf0b7d5141de235d45f | 2022-04-22T16:41:56.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers"
] | text-classification | false | bdickson | null | bdickson/distilbert-base-uncased-finetuned-cola | 4 | null | transformers | 19,450 | Entry not found |
TahaRazzaq/wav2vec2-base-urdu-demo-colab | 3814b806763134bf62a0b5b2b9b63b5467a04738 | 2022-04-23T02:50:59.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | TahaRazzaq | null | TahaRazzaq/wav2vec2-base-urdu-demo-colab | 4 | null | transformers | 19,451 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: wav2vec2-base-urdu-demo-colab
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-urdu-demo-colab
This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 32
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.10.3
|
juancavallotti/bert-base-culinary | 289d0822305ad13f79aaa73224ff5f002affaa07 | 2022-04-23T02:37:22.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | juancavallotti | null | juancavallotti/bert-base-culinary | 4 | null | transformers | 19,452 | Entry not found |
anshr/distilgpt2_reward_model_01 | fa0b88b5bcd07f690a4afded0818e1e477656f6f | 2022-04-23T15:43:07.000Z | [
"pytorch",
"gpt2",
"text-classification",
"transformers"
] | text-classification | false | anshr | null | anshr/distilgpt2_reward_model_01 | 4 | null | transformers | 19,453 | Entry not found |
marksverdhei/t5-deshuffle | 4c6e94e82c1e9ce99b523ba29ae9075b12c5c44b | 2022-04-25T11:10:52.000Z | [
"pytorch",
"t5",
"text2text-generation",
"en",
"dataset:stas/c4-en-10k",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | marksverdhei | null | marksverdhei/t5-deshuffle | 4 | 1 | transformers | 19,454 | ---
language: en
widget:
- text: ' brown dog fox jumped lazy over quick the the '
datasets:
- 'stas/c4-en-10k'
---
# T5-deshuffle
Bag Of Words (BOW) is a simple and typical encoding for making statistical models discover patterns in language
However BOW is a lossy compression that eliminates a very important feature of text: order
This model is trained to learn the most probable order of an unordered token sequence,
using a subset of the c4 dataset, and can thus be seen as a "bag-of-words decoder".
Currently, it does not perform well. I'm planning to re-train on a larger subset of c4 later (after may).
How to run:
```python
from transformers import T5ForConditionalGeneration, T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained("marksverdhei/t5-deshuffle")
model = T5ForConditionalGeneration.from_pretrained("marksverdhei/t5-deshuffle")
prompt = ' brown dog fox jumped lazy over quick the the '
ids = tokenizer(prompt, return_tensors="pt").input_ids
generated_tokens, = model.generate(ids)
print(tokenizer.decode(generated_tokens, skip_special_tokens=True))
``` |
akumar33/ManuBERT | 96c0397e8ce5ac128fba98575ef5ac5cfc568494 | 2022-04-24T00:06:14.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | akumar33 | null | akumar33/ManuBERT | 4 | null | transformers | 19,455 | Entry not found |
domenicrosati/t5-small-finetuned-contradiction-local-test | 40983810c1b75676ac4258f071a09298f105b26b | 2022-04-24T01:22:29.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:snli",
"transformers",
"summarization",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | summarization | false | domenicrosati | null | domenicrosati/t5-small-finetuned-contradiction-local-test | 4 | null | transformers | 19,456 | ---
license: apache-2.0
tags:
- summarization
- generated_from_trainer
datasets:
- snli
model-index:
- name: t5-small-finetuned-contradiction-local-test
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-contradiction-local-test
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the snli dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5.6e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|
| No log | 1.0 | 405 | 2.5110 | 23.4004 | 8.9397 | 20.9541 | 21.5922 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0
- Datasets 2.1.0
- Tokenizers 0.12.1
|
Felix92/doctr-torch-crnn-mobilenet-v3-large-french | c2767fc5bb6bf71baef483789563ea28dbce65d2 | 2022-05-25T21:31:24.000Z | [
"pytorch",
"en",
"fr",
"transformers",
"image-to-text"
] | image-to-text | false | Felix92 | null | Felix92/doctr-torch-crnn-mobilenet-v3-large-french | 4 | null | transformers | 19,457 |
---
pipeline_tag: image-to-text
language:
- en
- fr
---
<p align="center">
<img src="https://github.com/mindee/doctr/releases/download/v0.3.1/Logo_doctr.gif" width="60%">
</p>
**Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
## Task: recognition
https://github.com/mindee/doctr
### Example usage:
```python
>>> from doctr.io import DocumentFile
>>> from doctr.models import ocr_predictor, from_hub
>>> img = DocumentFile.from_images(['<image_path>'])
>>> # Load your model from the hub
>>> model = from_hub('mindee/my-model')
>>> # Pass it to the predictor
>>> # If your model is a recognition model:
>>> predictor = ocr_predictor(det_arch='db_mobilenet_v3_large',
>>> reco_arch=model,
>>> pretrained=True)
>>> # If your model is a detection model:
>>> predictor = ocr_predictor(det_arch=model,
>>> reco_arch='crnn_mobilenet_v3_small',
>>> pretrained=True)
>>> # Get your predictions
>>> res = predictor(img)
```
|
Hate-speech-CNERG/kannada-codemixed-abusive-MuRIL | 04fe9efb5d37aab77b0e0477198960a48fef8063 | 2022-05-03T08:48:39.000Z | [
"pytorch",
"bert",
"text-classification",
"ka-en",
"arxiv:2204.12543",
"transformers",
"license:afl-3.0"
] | text-classification | false | Hate-speech-CNERG | null | Hate-speech-CNERG/kannada-codemixed-abusive-MuRIL | 4 | null | transformers | 19,458 | ---
language: ka-en
license: afl-3.0
---
This model is used to detect **abusive speech** in **Code-Mixed Kannada**. It is finetuned on MuRIL model using Code-Mixed Kannada abusive speech dataset.
The model is trained with learning rates of 2e-5. Training code can be found at this [url](https://github.com/hate-alert/IndicAbusive)
LABEL_0 :-> Normal
LABEL_1 :-> Abusive
### For more details about our paper
Mithun Das, Somnath Banerjee and Animesh Mukherjee. "[Data Bootstrapping Approaches to Improve Low Resource Abusive Language Detection for Indic Languages](https://arxiv.org/abs/2204.12543)". Accepted at ACM HT 2022.
***Please cite our paper in any published work that uses any of these resources.***
~~~
@article{das2022data,
title={Data Bootstrapping Approaches to Improve Low Resource Abusive Language Detection for Indic Languages},
author={Das, Mithun and Banerjee, Somnath and Mukherjee, Animesh},
journal={arXiv preprint arXiv:2204.12543},
year={2022}
}
~~~ |
crcb/carer_new | 7832ed1faddff6dd0efdba4aae26518041816b8d | 2022-04-25T08:08:42.000Z | [
"pytorch",
"roberta",
"text-classification",
"unk",
"dataset:crcb/autotrain-data-carer_new",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | crcb | null | crcb/carer_new | 4 | null | transformers | 19,459 | ---
tags: autotrain
language: unk
widget:
- text: "I love AutoTrain 🤗"
datasets:
- crcb/autotrain-data-carer_new
co2_eq_emissions: 3.9861818439722594
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 781623992
- CO2 Emissions (in grams): 3.9861818439722594
## Validation Metrics
- Loss: 0.1639203429222107
- Accuracy: 0.9389179755671903
- Macro F1: 0.9055551236566716
- Micro F1: 0.9389179755671903
- Weighted F1: 0.9379300009988988
- Macro Precision: 0.9466951148514304
- Micro Precision: 0.9389179755671903
- Weighted Precision: 0.9435523016000105
- Macro Recall: 0.8818551804621082
- Micro Recall: 0.9389179755671903
- Weighted Recall: 0.9389179755671903
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crcb/autotrain-carer_new-781623992
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crcb/autotrain-carer_new-781623992", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crcb/autotrain-carer_new-781623992", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
SophieTr/PP0_rm_v1_full | da8eeffa63586d38cfc45ba27df6dfa18788bf81 | 2022-04-28T16:51:27.000Z | [
"pytorch",
"pegasus",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | SophieTr | null | SophieTr/PP0_rm_v1_full | 4 | null | transformers | 19,460 | Entry not found |
Ghost1/distilbert-base-uncased-finetuned2-imdb | 7455b1ee07d4294930c3cb37a826783980a3cefa | 2022-04-26T12:40:59.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"fill-mask",
"dataset:imdb",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | fill-mask | false | Ghost1 | null | Ghost1/distilbert-base-uncased-finetuned2-imdb | 4 | null | transformers | 19,461 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- imdb
model-index:
- name: distilbert-base-uncased-finetuned2-imdb
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned2-imdb
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset.
It achieves the following results on the evaluation set:
- Loss: 2.4725
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.707 | 1.0 | 157 | 2.4883 |
| 2.5761 | 2.0 | 314 | 2.4229 |
| 2.5255 | 3.0 | 471 | 2.4355 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
James-kc-min/SE_Roberta2 | f8287bd2ec79150d34127b6e7d54665329629190 | 2022-04-28T16:12:07.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | false | James-kc-min | null | James-kc-min/SE_Roberta2 | 4 | null | transformers | 19,462 | Entry not found |
anshr/distilgpt2_trained_policy_model_01 | 5065f58740a776fa1655dd424f6c8b97b664a610 | 2022-04-25T21:33:30.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | anshr | null | anshr/distilgpt2_trained_policy_model_01 | 4 | null | transformers | 19,463 | Entry not found |
anshr/distilgpt2_reward_model_04 | 147f7b758e75e74de7b0a3d20e6306f7c94d8fa5 | 2022-04-26T03:48:09.000Z | [
"pytorch",
"gpt2",
"text-classification",
"transformers"
] | text-classification | false | anshr | null | anshr/distilgpt2_reward_model_04 | 4 | null | transformers | 19,464 | Entry not found |
crcb/carer_5way | ca2f910d0e7a5b9ce8a700f4dade77e2d20e14e5 | 2022-04-26T05:46:33.000Z | [
"pytorch",
"roberta",
"text-classification",
"unk",
"dataset:crcb/autotrain-data-carer_5way",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | crcb | null | crcb/carer_5way | 4 | null | transformers | 19,465 | ---
tags: autotrain
language: unk
widget:
- text: "I love AutoTrain 🤗"
datasets:
- crcb/autotrain-data-carer_5way
co2_eq_emissions: 4.164757528958762
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 786524275
- CO2 Emissions (in grams): 4.164757528958762
## Validation Metrics
- Loss: 0.16724252700805664
- Accuracy: 0.944234404536862
- Macro F1: 0.9437256923758108
- Micro F1: 0.9442344045368619
- Weighted F1: 0.9442368364749825
- Macro Precision: 0.9431692663638349
- Micro Precision: 0.944234404536862
- Weighted Precision: 0.9446229335037916
- Macro Recall: 0.9446884750469657
- Micro Recall: 0.944234404536862
- Weighted Recall: 0.944234404536862
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crcb/autotrain-carer_5way-786524275
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crcb/autotrain-carer_5way-786524275", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crcb/autotrain-carer_5way-786524275", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
Isobutylcyclopentane/2022-055109-finetuned-eurosat | 63955dc7485acb1085301e5b78a1598e01dbae79 | 2022-04-26T07:19:39.000Z | [
"pytorch",
"tensorboard",
"perceiver",
"image-classification",
"transformers"
] | image-classification | false | Isobutylcyclopentane | null | Isobutylcyclopentane/2022-055109-finetuned-eurosat | 4 | null | transformers | 19,466 | Entry not found |
cynthiachan/procedure_classification_bert | efbb797f1ce49b7893c98bbe472509b68a20a507 | 2022-04-26T06:40:53.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | cynthiachan | null | cynthiachan/procedure_classification_bert | 4 | null | transformers | 19,467 | Entry not found |
scasutt/wav2vec2-large-xlsr-53_full_random_noise_01 | 7966a67f89cdc657ec60afcdf142a5ecd46bb178 | 2022-04-27T15:08:14.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | scasutt | null | scasutt/wav2vec2-large-xlsr-53_full_random_noise_01 | 4 | null | transformers | 19,468 | Entry not found |
Cheatham/xlm-roberta-large-finetuned-dAB-002 | 27cea11363bb5c5d7ad953d532cb90ed43afc6fa | 2022-04-26T07:51:59.000Z | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
] | text-classification | false | Cheatham | null | Cheatham/xlm-roberta-large-finetuned-dAB-002 | 4 | null | transformers | 19,469 | Entry not found |
IneG/glue_sst_classifier | dfafebd57cab187bd9a4c0d0c024c1e8a9afaeb0 | 2022-04-26T11:44:29.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | IneG | null | IneG/glue_sst_classifier | 4 | null | transformers | 19,470 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- f1
- accuracy
model-index:
- name: glue_sst_classifier
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
args: sst2
metrics:
- name: F1
type: f1
value: 0.9033707865168539
- name: Accuracy
type: accuracy
value: 0.9013761467889908
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# glue_sst_classifier
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2359
- F1: 0.9034
- Accuracy: 0.9014
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 1.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:------:|:--------:|
| 0.3653 | 0.19 | 100 | 0.3213 | 0.8717 | 0.8727 |
| 0.291 | 0.38 | 200 | 0.2662 | 0.8936 | 0.8911 |
| 0.2239 | 0.57 | 300 | 0.2417 | 0.9081 | 0.9060 |
| 0.2306 | 0.76 | 400 | 0.2359 | 0.9105 | 0.9094 |
| 0.2185 | 0.95 | 500 | 0.2371 | 0.9011 | 0.8991 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
dimboump/glue_sst_classifier | 4217bacc1d146ac7d7c3147e5d0ab15810eba9f4 | 2022-04-26T11:46:39.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | dimboump | null | dimboump/glue_sst_classifier | 4 | null | transformers | 19,471 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- f1
- accuracy
model-index:
- name: glue_sst_classifier
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
args: sst2
metrics:
- name: F1
type: f1
value: 0.9033707865168539
- name: Accuracy
type: accuracy
value: 0.9013761467889908
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# glue_sst_classifier
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2359
- F1: 0.9034
- Accuracy: 0.9014
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 1.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:------:|:--------:|
| 0.3653 | 0.19 | 100 | 0.3213 | 0.8717 | 0.8727 |
| 0.291 | 0.38 | 200 | 0.2662 | 0.8936 | 0.8911 |
| 0.2239 | 0.57 | 300 | 0.2417 | 0.9081 | 0.9060 |
| 0.2306 | 0.76 | 400 | 0.2359 | 0.9105 | 0.9094 |
| 0.2185 | 0.95 | 500 | 0.2371 | 0.9011 | 0.8991 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
MonaA/glue_sst_classifier_2 | 74d2f2b961c416e15e3c94143b491d092a7bbd35 | 2022-04-26T11:48:03.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | MonaA | null | MonaA/glue_sst_classifier_2 | 4 | null | transformers | 19,472 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- f1
- accuracy
model-index:
- name: glue_sst_classifier_2
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
args: sst2
metrics:
- name: F1
type: f1
value: 0.9033707865168539
- name: Accuracy
type: accuracy
value: 0.9013761467889908
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# glue_sst_classifier_2
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2359
- F1: 0.9034
- Accuracy: 0.9014
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 1.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:------:|:--------:|
| 0.3653 | 0.19 | 100 | 0.3213 | 0.8717 | 0.8727 |
| 0.291 | 0.38 | 200 | 0.2662 | 0.8936 | 0.8911 |
| 0.2239 | 0.57 | 300 | 0.2417 | 0.9081 | 0.9060 |
| 0.2306 | 0.76 | 400 | 0.2359 | 0.9105 | 0.9094 |
| 0.2185 | 0.95 | 500 | 0.2371 | 0.9011 | 0.8991 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
Caroline-Vandyck/glue_sst_classifier | 358858871dcbf9a03afb6c3cd8dad1b3b6213c7a | 2022-04-26T12:18:44.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | Caroline-Vandyck | null | Caroline-Vandyck/glue_sst_classifier | 4 | null | transformers | 19,473 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- f1
- accuracy
model-index:
- name: glue_sst_classifier
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
args: sst2
metrics:
- name: F1
type: f1
value: 0.9033707865168539
- name: Accuracy
type: accuracy
value: 0.9013761467889908
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# glue_sst_classifier
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2359
- F1: 0.9034
- Accuracy: 0.9014
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 1.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:------:|:--------:|
| 0.3653 | 0.19 | 100 | 0.3213 | 0.8717 | 0.8727 |
| 0.291 | 0.38 | 200 | 0.2662 | 0.8936 | 0.8911 |
| 0.2239 | 0.57 | 300 | 0.2417 | 0.9081 | 0.9060 |
| 0.2306 | 0.76 | 400 | 0.2359 | 0.9105 | 0.9094 |
| 0.2185 | 0.95 | 500 | 0.2371 | 0.9011 | 0.8991 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
corvusMidnight/glue_sst_classifier_ | e57087018a6435c49598feb181a0e38162eac736 | 2022-04-26T12:55:11.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | corvusMidnight | null | corvusMidnight/glue_sst_classifier_ | 4 | null | transformers | 19,474 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- f1
- accuracy
model-index:
- name: glue_sst_classifier_
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
args: sst2
metrics:
- name: F1
type: f1
value: 0.9033707865168539
- name: Accuracy
type: accuracy
value: 0.9013761467889908
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# glue_sst_classifier_
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2359
- F1: 0.9034
- Accuracy: 0.9014
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 1.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:------:|:--------:|
| 0.3653 | 0.19 | 100 | 0.3213 | 0.8717 | 0.8727 |
| 0.291 | 0.38 | 200 | 0.2662 | 0.8936 | 0.8911 |
| 0.2239 | 0.57 | 300 | 0.2417 | 0.9081 | 0.9060 |
| 0.2306 | 0.76 | 400 | 0.2359 | 0.9105 | 0.9094 |
| 0.2185 | 0.95 | 500 | 0.2371 | 0.9011 | 0.8991 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
anshr/distilgpt2_reward_model_05 | 8ac79f888d0dd28cf138331518e4280139c120a9 | 2022-04-26T21:44:56.000Z | [
"pytorch",
"gpt2",
"text-classification",
"transformers"
] | text-classification | false | anshr | null | anshr/distilgpt2_reward_model_05 | 4 | null | transformers | 19,475 | Entry not found |
Rem59/autotrain-Test_2-789524315 | 10cd8808be67e735c38575b0aaa1eb5c3fc1557d | 2022-04-26T19:11:30.000Z | [
"pytorch",
"camembert",
"text-classification",
"unk",
"dataset:Rem59/autotrain-data-Test_2",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | Rem59 | null | Rem59/autotrain-Test_2-789524315 | 4 | null | transformers | 19,476 | ---
tags: autotrain
language: unk
widget:
- text: "I love AutoTrain 🤗"
datasets:
- Rem59/autotrain-data-Test_2
co2_eq_emissions: 2.0134443204822188
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 789524315
- CO2 Emissions (in grams): 2.0134443204822188
## Validation Metrics
- Loss: 0.8042349815368652
- Accuracy: 0.6904761904761905
- Macro F1: 0.27230046948356806
- Micro F1: 0.6904761904761905
- Weighted F1: 0.5640509725016768
- Macro Precision: 0.23015873015873015
- Micro Precision: 0.6904761904761905
- Weighted Precision: 0.4767573696145125
- Macro Recall: 0.3333333333333333
- Micro Recall: 0.6904761904761905
- Weighted Recall: 0.6904761904761905
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/Rem59/autotrain-Test_2-789524315
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("Rem59/autotrain-Test_2-789524315", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("Rem59/autotrain-Test_2-789524315", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
obokkkk/opus-mt-ko-en-finetuned-en-to-ko | a8818ea08ad7b9b90ca7e849a404008c6363854e | 2022-04-27T06:01:35.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"dataset:kde4",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | obokkkk | null | obokkkk/opus-mt-ko-en-finetuned-en-to-ko | 4 | null | transformers | 19,477 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- kde4
metrics:
- bleu
model-index:
- name: opus-mt-ko-en-finetuned-en-to-ko
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: kde4
type: kde4
args: en-ko
metrics:
- name: Bleu
type: bleu
value: 17.4129
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# opus-mt-ko-en-finetuned-en-to-ko
This model is a fine-tuned version of [Helsinki-NLP/opus-mt-ko-en](https://huggingface.co/Helsinki-NLP/opus-mt-ko-en) on the kde4 dataset.
It achieves the following results on the evaluation set:
- Loss: 2.1606
- Bleu: 17.4129
- Gen Len: 10.8989
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|
| 2.3645 | 1.0 | 3596 | 2.1606 | 17.4129 | 10.8989 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
aherzberg/wav2vec2-base-finetuned | 73bc5b24d22a5795f7997312c82c07158e578950 | 2022-05-01T22:13:10.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"audio-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | audio-classification | false | aherzberg | null | aherzberg/wav2vec2-base-finetuned | 4 | null | transformers | 19,478 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: wav2vec2-base-finetuned
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-finetuned
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5114
- Accuracy: 0.8383
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 3e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 128
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 1.5081 | 0.99 | 79 | 1.3270 | 0.5857 |
| 0.8949 | 1.99 | 158 | 0.8406 | 0.7412 |
| 0.6861 | 2.99 | 237 | 0.6829 | 0.7818 |
| 0.5477 | 3.99 | 316 | 0.6234 | 0.7942 |
| 0.4601 | 4.99 | 395 | 0.6184 | 0.8004 |
| 0.3969 | 5.99 | 474 | 0.5768 | 0.8039 |
| 0.3276 | 6.99 | 553 | 0.5441 | 0.8224 |
| 0.2975 | 7.99 | 632 | 0.5205 | 0.8295 |
| 0.2809 | 8.99 | 711 | 0.5204 | 0.8322 |
| 0.2315 | 9.99 | 790 | 0.5114 | 0.8383 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.5.0
- Datasets 1.14.0
- Tokenizers 0.10.3
|
UT/BMW_DEBIAS | 496a47c2778797e5e61a85e52dc918c2b868e49b | 2022-04-27T11:55:59.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | false | UT | null | UT/BMW_DEBIAS | 4 | null | transformers | 19,479 | Entry not found |
faisalahmad2/autotrain-nlp-text-summarization-by-faisal-793224456 | 3eb3e0a67d0093dae5e8936aed567f47335ec3fc | 2022-04-29T14:05:30.000Z | [
"pytorch",
"t5",
"text2text-generation",
"en",
"dataset:faisalahmad2/autotrain-data-nlp-text-summarization-by-faisal",
"transformers",
"autotrain",
"co2_eq_emissions",
"autotrain_compatible"
] | text2text-generation | false | faisalahmad2 | null | faisalahmad2/autotrain-nlp-text-summarization-by-faisal-793224456 | 4 | null | transformers | 19,480 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- faisalahmad2/autotrain-data-nlp-text-summarization-by-faisal
co2_eq_emissions: 27.26671996544415
---
# Model Trained Using AutoTrain
- Problem type: Summarization
- Model ID: 793224456
- CO2 Emissions (in grams): 27.26671996544415
## Validation Metrics
- Loss: 1.5189369916915894
- Rouge1: 38.7852
- Rouge2: 17.0785
- RougeL: 32.1082
- RougeLsum: 32.1103
- Gen Len: 18.7332
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/faisalahmad2/autotrain-nlp-text-summarization-by-faisal-793224456
``` |
cassiepowell/LaBSE-for-similarity | 786d42a485b1cac1fdf2a3ca7f190f95078e280f | 2022-04-28T17:57:33.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | cassiepowell | null | cassiepowell/LaBSE-for-similarity | 4 | null | transformers | 19,481 | Entry not found |
caush/Clickbait3 | c1df78dd26b5acb27c65ecaf531188b231821968 | 2022-04-28T02:06:02.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
] | text-classification | false | caush | null | caush/Clickbait3 | 4 | null | transformers | 19,482 | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: Clickbait3
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Clickbait3
This model is a fine-tuned version of [microsoft/Multilingual-MiniLM-L12-H384](https://huggingface.co/microsoft/Multilingual-MiniLM-L12-H384) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0248
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 0.05 | 50 | 0.0373 |
| No log | 0.1 | 100 | 0.0320 |
| No log | 0.15 | 150 | 0.0295 |
| No log | 0.21 | 200 | 0.0302 |
| No log | 0.26 | 250 | 0.0331 |
| No log | 0.31 | 300 | 0.0280 |
| No log | 0.36 | 350 | 0.0277 |
| No log | 0.41 | 400 | 0.0316 |
| No log | 0.46 | 450 | 0.0277 |
| 0.0343 | 0.51 | 500 | 0.0276 |
| 0.0343 | 0.56 | 550 | 0.0282 |
| 0.0343 | 0.62 | 600 | 0.0280 |
| 0.0343 | 0.67 | 650 | 0.0271 |
| 0.0343 | 0.72 | 700 | 0.0264 |
| 0.0343 | 0.77 | 750 | 0.0265 |
| 0.0343 | 0.82 | 800 | 0.0260 |
| 0.0343 | 0.87 | 850 | 0.0263 |
| 0.0343 | 0.92 | 900 | 0.0259 |
| 0.0343 | 0.97 | 950 | 0.0277 |
| 0.0278 | 1.03 | 1000 | 0.0281 |
| 0.0278 | 1.08 | 1050 | 0.0294 |
| 0.0278 | 1.13 | 1100 | 0.0256 |
| 0.0278 | 1.18 | 1150 | 0.0258 |
| 0.0278 | 1.23 | 1200 | 0.0254 |
| 0.0278 | 1.28 | 1250 | 0.0265 |
| 0.0278 | 1.33 | 1300 | 0.0252 |
| 0.0278 | 1.38 | 1350 | 0.0251 |
| 0.0278 | 1.44 | 1400 | 0.0264 |
| 0.0278 | 1.49 | 1450 | 0.0262 |
| 0.023 | 1.54 | 1500 | 0.0272 |
| 0.023 | 1.59 | 1550 | 0.0278 |
| 0.023 | 1.64 | 1600 | 0.0255 |
| 0.023 | 1.69 | 1650 | 0.0258 |
| 0.023 | 1.74 | 1700 | 0.0262 |
| 0.023 | 1.79 | 1750 | 0.0250 |
| 0.023 | 1.85 | 1800 | 0.0253 |
| 0.023 | 1.9 | 1850 | 0.0271 |
| 0.023 | 1.95 | 1900 | 0.0248 |
| 0.023 | 2.0 | 1950 | 0.0258 |
| 0.0224 | 2.05 | 2000 | 0.0252 |
| 0.0224 | 2.1 | 2050 | 0.0259 |
| 0.0224 | 2.15 | 2100 | 0.0254 |
| 0.0224 | 2.21 | 2150 | 0.0260 |
| 0.0224 | 2.26 | 2200 | 0.0254 |
| 0.0224 | 2.31 | 2250 | 0.0266 |
| 0.0224 | 2.36 | 2300 | 0.0258 |
| 0.0224 | 2.41 | 2350 | 0.0258 |
| 0.0224 | 2.46 | 2400 | 0.0256 |
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0
- Datasets 2.0.0
- Tokenizers 0.11.6
|
caush/Clickbait5 | 5c9585678047a7048bb501b420c0ea32e1cf0d98 | 2022-04-28T03:15:08.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers",
"generated_from_trainer",
"model-index"
] | text-classification | false | caush | null | caush/Clickbait5 | 4 | null | transformers | 19,483 | ---
tags:
- generated_from_trainer
model-index:
- name: Clickbait5
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Clickbait5
This model was trained from scratch on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0258
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 0.04 | 50 | 0.0258 |
| No log | 0.08 | 100 | 0.0269 |
| No log | 0.12 | 150 | 0.0259 |
| No log | 0.16 | 200 | 0.0260 |
| No log | 0.21 | 250 | 0.0267 |
| No log | 0.25 | 300 | 0.0276 |
| No log | 0.29 | 350 | 0.0284 |
| No log | 0.33 | 400 | 0.0270 |
| No log | 0.37 | 450 | 0.0269 |
| 0.0195 | 0.41 | 500 | 0.0260 |
| 0.0195 | 0.45 | 550 | 0.0284 |
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0
- Datasets 2.0.0
- Tokenizers 0.11.6
|
oliverguhr/wav2vec2-large-xlsr-53-german-cv8 | 47ae8592c47551cd5da7dba65796e33486246cb5 | 2022-05-05T07:58:33.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"de",
"dataset:common_voice",
"transformers",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | oliverguhr | null | oliverguhr/wav2vec2-large-xlsr-53-german-cv8 | 4 | null | transformers | 19,484 | ---
language:
- de
license: apache-2.0
tags:
- automatic-speech-recognition
- mozilla-foundation/common_voice_8_0
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: wav2vec2-large-xlsr-53-german-cv8-dropout
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xlsr-53-german-cv8-dropout
This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - DE dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1111
- Wer: 0.1117
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 32
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 10.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 0.2081 | 1.0 | 6815 | 0.1784 | 0.1910 |
| 0.1686 | 2.0 | 13630 | 0.1621 | 0.1725 |
| 0.1515 | 3.0 | 20445 | 0.1569 | 0.1649 |
| 0.1426 | 4.0 | 27260 | 0.1466 | 0.1681 |
| 0.135 | 5.0 | 34075 | 0.1357 | 0.1410 |
| 0.1093 | 6.0 | 40890 | 0.1313 | 0.1436 |
| 0.1 | 7.0 | 47705 | 0.1242 | 0.1250 |
| 0.0999 | 8.0 | 54520 | 0.1191 | 0.1218 |
| 0.084 | 9.0 | 61335 | 0.1134 | 0.1164 |
| 0.0752 | 10.0 | 68150 | 0.1111 | 0.1117 |
### Framework versions
- Transformers 4.19.0.dev0
- Pytorch 1.11.0+cu113
- Datasets 2.0.0
- Tokenizers 0.11.6
|
classla/wav2vec2-large-slavic-parlaspeech-hr-lm | d89612cdc04ca1bb3d2f4bc54b2db91351160d3b | 2022-05-18T14:06:27.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"hr",
"dataset:parlaspeech-hr",
"transformers",
"audio",
"parlaspeech"
] | automatic-speech-recognition | false | classla | null | classla/wav2vec2-large-slavic-parlaspeech-hr-lm | 4 | null | transformers | 19,485 | ---
language: hr
datasets:
- parlaspeech-hr
tags:
- audio
- automatic-speech-recognition
- parlaspeech
widget:
- example_title: example 1
src: https://huggingface.co/classla/wav2vec2-xls-r-parlaspeech-hr/raw/main/1800.m4a
- example_title: example 2
src: https://huggingface.co/classla/wav2vec2-xls-r-parlaspeech-hr/raw/main/00020578b.flac.wav
- example_title: example 3
src: https://huggingface.co/classla/wav2vec2-xls-r-parlaspeech-hr/raw/main/00020570a.flac.wav
---
# wav2vec2-large-slavic-parlaspeech-hr-lm
This model for Croatian ASR is based on the [facebook/wav2vec2-large-slavic-voxpopuli-v2 model](https://huggingface.co/facebook/wav2vec2-large-slavic-voxpopuli-v2) and was fine-tuned with 300 hours of recordings and transcripts from the ASR Croatian parliament dataset [ParlaSpeech-HR v1.0](http://hdl.handle.net/11356/1494) and enhanced with a 5-gram language model based on the [ParlaMint dataset](http://hdl.handle.net/11356/1432).
If you use this model, please cite the following paper:
Nikola Ljubešić, Danijel Koržinek, Peter Rupnik, Ivo-Pavao Jazbec. ParlaSpeech-HR -- a freely available ASR dataset for Croatian bootstrapped from the ParlaMint corpus. Accepted at ParlaCLARIN@LREC.
## Metrics
Evaluation is performed on the dev and test portions of the [ParlaSpeech-HR v1.0](http://hdl.handle.net/11356/1494) dataset.
|split|CER|WER|
|---|---|---|
|dev|0.0253|0.0556|
|test|0.0188|0.0430|
## Usage in `transformers`
Tested with `transformers==4.18.0`, `torch==1.11.0`, and `SoundFile==0.10.3.post1`.
```python
from transformers import Wav2Vec2ProcessorWithLM, Wav2Vec2ForCTC
import soundfile as sf
import torch
import os
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load model and tokenizer
processor = Wav2Vec2ProcessorWithLM.from_pretrained(
"classla/wav2vec2-large-slavic-parlaspeech-hr-lm")
model = Wav2Vec2ForCTC.from_pretrained("classla/wav2vec2-large-slavic-parlaspeech-hr-lm")
# download the example wav files:
os.system("wget https://huggingface.co/classla/wav2vec2-large-slavic-parlaspeech-hr-lm/raw/main/00020570a.flac.wav")
# read the wav file
speech, sample_rate = sf.read("00020570a.flac.wav")
input_values = processor(speech, sampling_rate=sample_rate, return_tensors="pt").input_values.cuda()
inputs = processor(speech, sampling_rate=sample_rate, return_tensors="pt")
with torch.no_grad():
logits = model(**inputs).logits
transcription = processor.batch_decode(logits.numpy()).text[0]
# remove the raw wav file
os.system("rm 00020570a.flac.wav")
transcription # 'velik broj poslovnih subjekata poslao je sa minusom velik dio'
```
## Training hyperparameters
In fine-tuning, the following arguments were used:
| arg | value |
|-------------------------------|-------|
| `per_device_train_batch_size` | 16 |
| `gradient_accumulation_steps` | 4 |
| `num_train_epochs` | 8 |
| `learning_rate` | 3e-4 |
| `warmup_steps` | 500 | |
aakarshan/autotrain-Question-translation-797524592 | 48aac34cfe416468fbc29d290a4a1c2d1bb532fe | 2022-04-28T14:48:38.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"en",
"hi",
"dataset:aakarshan/autotrain-data-Question-translation",
"transformers",
"autotrain",
"translation",
"co2_eq_emissions",
"autotrain_compatible"
] | translation | false | aakarshan | null | aakarshan/autotrain-Question-translation-797524592 | 4 | null | transformers | 19,486 | ---
tags:
- autotrain
- translation
language:
- en
- hi
datasets:
- aakarshan/autotrain-data-Question-translation
co2_eq_emissions: 27.564419884224776
---
# Model Trained Using AutoTrain
- Problem type: Translation
- Model ID: 797524592
- CO2 Emissions (in grams): 27.564419884224776
## Validation Metrics
- Loss: 2.2697999477386475
- SacreBLEU: 14.9797
- Gen len: 13.7071 |
juancavallotti/roberta-base-culinary-finetuned | 6fecab9a9ee91005fb67d36a22a556423ced01d6 | 2022-04-28T17:42:59.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers",
"generated_from_trainer",
"model-index"
] | text-classification | false | juancavallotti | null | juancavallotti/roberta-base-culinary-finetuned | 4 | null | transformers | 19,487 | ---
tags:
- generated_from_trainer
metrics:
- f1
model-index:
- name: roberta-base-culinary-finetuned
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-culinary-finetuned
This model was trained from scratch on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0657
- F1: 0.9929
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 0.1803 | 0.11 | 500 | 0.1939 | 0.9611 |
| 0.1543 | 0.22 | 1000 | 0.1364 | 0.9669 |
| 0.1213 | 0.32 | 1500 | 0.1487 | 0.9728 |
| 0.1079 | 0.43 | 2000 | 0.0855 | 0.9773 |
| 0.0975 | 0.54 | 2500 | 0.0844 | 0.9831 |
| 0.0855 | 0.65 | 3000 | 0.0785 | 0.9831 |
| 0.0844 | 0.76 | 3500 | 0.0679 | 0.9857 |
| 0.0793 | 0.86 | 4000 | 0.0489 | 0.9890 |
| 0.0864 | 0.97 | 4500 | 0.0399 | 0.9903 |
| 0.049 | 1.08 | 5000 | 0.0528 | 0.9890 |
| 0.0353 | 1.19 | 5500 | 0.0635 | 0.9877 |
| 0.0321 | 1.3 | 6000 | 0.0542 | 0.9903 |
| 0.0311 | 1.41 | 6500 | 0.0559 | 0.9896 |
| 0.0315 | 1.51 | 7000 | 0.0736 | 0.9857 |
| 0.04 | 1.62 | 7500 | 0.0648 | 0.9909 |
| 0.0265 | 1.73 | 8000 | 0.0608 | 0.9909 |
| 0.0443 | 1.84 | 8500 | 0.0617 | 0.9883 |
| 0.0443 | 1.95 | 9000 | 0.0555 | 0.9896 |
| 0.0235 | 2.05 | 9500 | 0.0608 | 0.9903 |
| 0.0139 | 2.16 | 10000 | 0.0613 | 0.9922 |
| 0.0126 | 2.27 | 10500 | 0.0739 | 0.9903 |
| 0.0164 | 2.38 | 11000 | 0.0679 | 0.9903 |
| 0.0172 | 2.49 | 11500 | 0.0606 | 0.9922 |
| 0.0175 | 2.59 | 12000 | 0.0442 | 0.9942 |
| 0.01 | 2.7 | 12500 | 0.0661 | 0.9916 |
| 0.0059 | 2.81 | 13000 | 0.0659 | 0.9929 |
| 0.0216 | 2.92 | 13500 | 0.0504 | 0.9929 |
| 0.0123 | 3.03 | 14000 | 0.0584 | 0.9929 |
| 0.0047 | 3.14 | 14500 | 0.0573 | 0.9929 |
| 0.0123 | 3.24 | 15000 | 0.0511 | 0.9935 |
| 0.0027 | 3.35 | 15500 | 0.0579 | 0.9942 |
| 0.0025 | 3.46 | 16000 | 0.0602 | 0.9935 |
| 0.0051 | 3.57 | 16500 | 0.0598 | 0.9935 |
| 0.0044 | 3.68 | 17000 | 0.0617 | 0.9929 |
| 0.0061 | 3.78 | 17500 | 0.0634 | 0.9935 |
| 0.0048 | 3.89 | 18000 | 0.0672 | 0.9929 |
| 0.0078 | 4.0 | 18500 | 0.0657 | 0.9929 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
Raffay/wav2vec-urdu-asr-project | 331221ecd020ea7837ce8ad77df166f4da58fec7 | 2022-05-02T13:07:41.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Raffay | null | Raffay/wav2vec-urdu-asr-project | 4 | null | transformers | 19,488 | Entry not found |
UT/PARSBRT_DEBIAS | 5316be0053e24c3446dcdbe86ec8f55d987ce7b9 | 2022-04-28T22:33:37.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | false | UT | null | UT/PARSBRT_DEBIAS | 4 | null | transformers | 19,489 | Entry not found |
shahidul034/sentence_equivalent_check | 79e61fc49b56384529581c45317b04ae8d9ae29f | 2022-04-30T11:28:04.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | shahidul034 | null | shahidul034/sentence_equivalent_check | 4 | 1 | transformers | 19,490 | This model helps to identify the equivalent of two sentences.
==>python 3.8 working in transformers installation
-->pip install git+https://github.com/huggingface/transformers
-->python -m pip install jupyter
-->pip install torch==1.5.0 -f https://download.pytorch.org/whl/torch_stable.html
-->pip install tensorflow-gpu
How to create virtual environment:
Main tutorial: https://www.datacamp.com/community/tutorials/virtual-environment-in-python
https://www.geeksforgeeks.org/set-up-virtual-environment-for-python-using-anaconda/
# Creating a new Virtual Environment.
The following command takes '-n' as a flag, which is for creating a new environment with its name as 'env' and the specific Python version of '3.7'.
-->conda create -n env python=3.6
Activating the Virtual Environment.
The command below activates the Virtual Environment, which changes the prompt where the 'env' is shown in parenthesis.
-->conda activate env
Install the required package.
For example, the 'numpy' package is installed where 'env' is the specific Virtual Environment.
-->conda install -n env numpy
Listing all of the installed packages inside a Virtual Environment.
The following command can list the package specific to the Virtual Environment. -->conda list
Listing out all of the created Virtual Environment.
All of the environments created will be listed by the following command.
-->conda env list
Deactivating the Virtual Environment.
The following command will deactivate the current environment 'env' and will change to 'base'.
-->conda deactivate
Removing the Virtual Environment.
The following command removes the 'myenv' Virtual Environment with all its packages at the same time.
-->conda env remove -n myenv
Install jupyter kernel for the virtual environment using the following command:
Running the following command will create a kernel that can be used to run jupyter notebook commands inside the virtual environment.
-->ipython kernel install --user --name=venv
Select the installed kernel when you want to use jupyter notebook in this virtual environment.
You can see now you have the kernel in the list of kernels and now you can have separate dependencies for the jupyter notebook and be more organized. After you are done with the project and no longer need the kernel you can uninstall it by running the following code:
-->jupyter-kernelspec uninstall venv
Tutorial:geeksforgeeks.org/using-jupyter-notebook-in-virtual-environment/
==>sometimes it needs to install jupyter notebook
-->python -m pip install jupyter
==> Open the jupyter notebook in anaconda prompt when virual envirnment on.then write jupyter-notebook in anaconda promt(env must be on)
(https://stackoverflow.com/questions/42449814/running-jupyter-notebook-in-a-virtualenv-installed-sklearn-module-not-available)
==> for installation pytorch ,follow this tuorial
(https://stackoverflow.com/questions/57499002/cant-install-pytorch-with-pip-on-windows)
|
Rbanerjee/simpsons-character-discriminator | 71f924bfbf929808151d51bd8a115cfea94abe18 | 2022-04-28T21:52:49.000Z | [
"pytorch",
"distilbert",
"text-classification",
"transformers"
] | text-classification | false | Rbanerjee | null | Rbanerjee/simpsons-character-discriminator | 4 | null | transformers | 19,491 | Entry not found |
hippoarale/mT5_multilingual_XLSum-finetuned-th-wikilingua | a41bdc7e1c22c3e6f345c1710e067eac12267d66 | 2022-05-01T11:16:51.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | hippoarale | null | hippoarale/mT5_multilingual_XLSum-finetuned-th-wikilingua | 4 | null | transformers | 19,492 | ---
tags:
- generated_from_trainer
model-index:
- name: outputs
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# outputs
This model is a fine-tuned version of [csebuetnlp/mT5_multilingual_XLSum](https://huggingface.co/csebuetnlp/mT5_multilingual_XLSum) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0005
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 250
- num_epochs: 3.0
### Training results
### Framework versions
- Transformers 4.19.0.dev0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
shoubhik/electra_abbv | 932956dfba58836b05bc52c1b455e9cd4903f38a | 2022-04-29T11:15:57.000Z | [
"pytorch",
"electra",
"text-classification",
"transformers"
] | text-classification | false | shoubhik | null | shoubhik/electra_abbv | 4 | null | transformers | 19,493 | Entry not found |
UT/MULTIBRT | e1fd568cfd12491b4c7744afd4b1d4ede676adc8 | 2022-04-29T12:18:23.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | UT | null | UT/MULTIBRT | 4 | null | transformers | 19,494 | Entry not found |
bhuvi/super_nli | 3966d74e2e2dc286ff8b42dc811811754416451e | 2022-04-29T17:39:14.000Z | [
"pytorch",
"mpnet",
"feature-extraction",
"transformers"
] | feature-extraction | false | bhuvi | null | bhuvi/super_nli | 4 | null | transformers | 19,495 | Entry not found |
masapasa/deberta_amazon_reviews_v2 | d4c5ebde4ecf8f65baa25dff78ef8e1c7861031d | 2022-04-29T16:26:04.000Z | [
"pytorch",
"tensorboard",
"deberta-v2",
"text-classification",
"transformers"
] | text-classification | false | masapasa | null | masapasa/deberta_amazon_reviews_v2 | 4 | null | transformers | 19,496 | Entry not found |
mrm8488/data2vec-text-base-finetuned-mnli | 219587c87af530c91ced390ce1cdbf3b26d05cba | 2022-04-29T21:05:54.000Z | [
"pytorch",
"tensorboard",
"data2vec-text",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
] | text-classification | false | mrm8488 | null | mrm8488/data2vec-text-base-finetuned-mnli | 4 | null | transformers | 19,497 | ---
license: mit
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: data2vec-text-base-finetuned-mnli
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
args: mnli
metrics:
- name: Accuracy
type: accuracy
value: 0.7862455425369332
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# data2vec-text-base-finetuned-mnli
This model is a fine-tuned version of [facebook/data2vec-text-base](https://huggingface.co/facebook/data2vec-text-base) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5521
- Accuracy: 0.7862
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:------:|:---------------:|:--------:|
| 1.099 | 1.0 | 24544 | 1.0987 | 0.3182 |
| 1.0993 | 2.0 | 49088 | 1.0979 | 0.3545 |
| 0.7481 | 3.0 | 73632 | 0.7197 | 0.7046 |
| 0.5671 | 4.0 | 98176 | 0.5862 | 0.7728 |
| 0.5505 | 5.0 | 122720 | 0.5521 | 0.7862 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
obokkkk/mt5-base_2_3 | 7d698b01a36f750c88af116300d927a64692a11a | 2022-05-01T11:36:51.000Z | [
"pytorch",
"tensorboard",
"mt5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | obokkkk | null | obokkkk/mt5-base_2_3 | 4 | null | transformers | 19,498 | ---
tags:
- generated_from_trainer
metrics:
- bleu
model-index:
- name: mt5-base_2_3
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mt5-base_2_3
This model is a fine-tuned version of [obokkkk/mt5-base_2](https://huggingface.co/obokkkk/mt5-base_2) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1465
- Bleu: 9.5474
- Gen Len: 17.854
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 64
- total_train_batch_size: 512
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|
| No log | 1.0 | 175 | 1.1739 | 9.0271 | 17.8543 |
| No log | 2.0 | 350 | 1.1660 | 9.1398 | 17.8468 |
| 1.3653 | 3.0 | 525 | 1.1585 | 9.251 | 17.8656 |
| 1.3653 | 4.0 | 700 | 1.1538 | 9.3176 | 17.8476 |
| 1.3653 | 5.0 | 875 | 1.1518 | 9.3529 | 17.8608 |
| 1.2985 | 6.0 | 1050 | 1.1505 | 9.4818 | 17.8552 |
| 1.2985 | 7.0 | 1225 | 1.1475 | 9.499 | 17.8575 |
| 1.2985 | 8.0 | 1400 | 1.1471 | 9.5511 | 17.871 |
| 1.2632 | 9.0 | 1575 | 1.1459 | 9.5315 | 17.8547 |
| 1.2632 | 10.0 | 1750 | 1.1465 | 9.5474 | 17.854 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
dyyyyyyyy/xTune_panx_XLM-RoBERTa-large | 8ca63b4ddda6ced690fc87a92df969384507bb8c | 2022-04-30T08:41:01.000Z | [
"pytorch",
"xlm-roberta",
"transformers"
] | null | false | dyyyyyyyy | null | dyyyyyyyy/xTune_panx_XLM-RoBERTa-large | 4 | null | transformers | 19,499 | Entry not found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.