modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
list | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Jeevesh8/std_pnt_04_feather_berts-21 | 9f54a2124a7be60769c99d545a9f861555c45216 | 2022-06-12T06:04:19.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-21 | 5 | null | transformers | 17,400 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-19 | 87e30f27a7ddf29b742f482f08649e5c6bff2485 | 2022-06-12T06:03:27.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-19 | 5 | null | transformers | 17,401 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-75 | d0a179d5f2a6435bc5339de95c505065ee94b685 | 2022-06-12T06:03:02.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-75 | 5 | null | transformers | 17,402 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-55 | 642b26f5ec85a837fb9e9c44afd430e2461f8751 | 2022-06-12T06:03:00.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-55 | 5 | null | transformers | 17,403 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-90 | 7c8ad68b19b68c9b340711e59ce734d6ef216e0c | 2022-06-12T06:03:07.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-90 | 5 | null | transformers | 17,404 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-20 | 0103d89586de016cc6b4458397852dde6581ee55 | 2022-06-12T06:04:20.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-20 | 5 | null | transformers | 17,405 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-86 | 86669402db1305a134cadd7374e3bec79d6a533c | 2022-06-12T06:03:03.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-86 | 5 | null | transformers | 17,406 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-84 | 193c6c8ed6a29be99792f9a418d08c52dd83d8df | 2022-06-12T06:02:59.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-84 | 5 | null | transformers | 17,407 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-74 | e561460d57dd9e3d7fc7bdde9c0228c3943dfdad | 2022-06-12T06:02:57.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-74 | 5 | null | transformers | 17,408 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-63 | 8fdbd2993ce0ae903d485390e1e5bdd37c9a0596 | 2022-06-12T06:03:07.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-63 | 5 | null | transformers | 17,409 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-56 | 74f66f2d865a6db37af6798cf7320f5d04ae08bb | 2022-06-12T06:03:07.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-56 | 5 | null | transformers | 17,410 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-13 | ac9e38605a14aa2407f961fe92a680ac0ae102ef | 2022-06-12T06:03:26.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-13 | 5 | null | transformers | 17,411 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-17 | 155678e155a803c6cea3d0914530e06ef2c02a09 | 2022-06-12T06:03:06.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-17 | 5 | null | transformers | 17,412 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-9 | 24208c3920dfad06abc780fdca47085bbe7209c8 | 2022-06-12T06:03:28.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-9 | 5 | null | transformers | 17,413 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-92 | e51ad1d9d9a8649334e2dcb0a70946904211edb1 | 2022-06-12T06:03:07.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-92 | 5 | null | transformers | 17,414 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-89 | b93e5e0036e4d69b27c0c92fc506bbc22e5b4b1e | 2022-06-12T06:03:01.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-89 | 5 | null | transformers | 17,415 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-40 | 2fd58ca0c31de38c7ce1adc3260e7a7cd15c8450 | 2022-06-12T06:03:02.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-40 | 5 | null | transformers | 17,416 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-73 | 6645be44bee99da54c8b31e67abec8eb67ebb1bf | 2022-06-12T06:03:06.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-73 | 5 | null | transformers | 17,417 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-38 | d288a70a976a10add9c055ed4b1ff56a7bfe1a0f | 2022-06-12T06:02:58.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-38 | 5 | null | transformers | 17,418 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-36 | 45fdeef68908d425889140f3a5a2089f9dba384c | 2022-06-12T06:04:08.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-36 | 5 | null | transformers | 17,419 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-22 | 4b683e7fcd9e967972f27e5a352f8cc823dd97df | 2022-06-12T06:03:02.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-22 | 5 | null | transformers | 17,420 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-77 | 3c3566f4b63ef6b1ed6d2bdf02e4d17b5f703448 | 2022-06-12T06:02:59.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-77 | 5 | null | transformers | 17,421 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-49 | 782f19b1a5f2e26f38e385b53673528f38d0434e | 2022-06-12T06:03:17.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-49 | 5 | null | transformers | 17,422 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-69 | bfd28f4f5b6926325995e5d770ec0b5eadfdbf63 | 2022-06-12T06:03:15.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-69 | 5 | null | transformers | 17,423 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-70 | 75a806abd62db6eb0062de7bc3d34dbc4bc3b413 | 2022-06-12T06:03:12.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-70 | 5 | null | transformers | 17,424 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-32 | 90f1ad7dae49f206f18a6cbb7e22da0278ee0d1f | 2022-06-12T06:03:10.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-32 | 5 | null | transformers | 17,425 | Entry not found |
Jeevesh8/std_pnt_04_feather_berts-98 | b5d9d0155cfbb1462dffa09f72ad1c5f7400e8f2 | 2022-06-12T06:05:52.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_pnt_04_feather_berts-98 | 5 | null | transformers | 17,426 | Entry not found |
kravchenko/uk-mt5-large | 80052b04cb7867a1a3a4898d9c5ef94b10985888 | 2022-06-12T15:00:46.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"uk",
"en",
"transformers",
"t5",
"autotrain_compatible"
] | text2text-generation | false | kravchenko | null | kravchenko/uk-mt5-large | 5 | null | transformers | 17,427 | ---
language:
- uk
- en
tags:
- t5
---
The aim is to compress the mT5-large model to leave only the Ukrainian language and some basic English.
Reproduced the similar result (but with another language) from [this](https://towardsdatascience.com/how-to-adapt-a-multilingual-t5-model-for-a-single-language-b9f94f3d9c90) medium article.
Results:
- 1.2B params -> 779M params (37%)
- 250K tokens -> 8900 tokens
- 4.6GB size model -> 2.9GB size model
|
course5i/SEAD-L-6_H-384_A-12-mrpc | dd01b469315bfa69dff2e956085ce190030164ea | 2022-06-12T20:21:42.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"text-classification",
"en",
"dataset:glue",
"dataset:mrpc",
"arxiv:1910.01108",
"arxiv:1909.10351",
"arxiv:2002.10957",
"arxiv:1810.04805",
"arxiv:1804.07461",
"arxiv:1905.00537",
"transformers",
"SEAD",
"license:apache-2.0"
] | text-classification | false | course5i | null | course5i/SEAD-L-6_H-384_A-12-mrpc | 5 | null | transformers | 17,428 | ---
language:
- en
license: apache-2.0
tags:
- SEAD
datasets:
- glue
- mrpc
---
## Paper
## [SEAD: SIMPLE ENSEMBLE AND KNOWLEDGE DISTILLATION FRAMEWORK FOR NATURAL LANGUAGE UNDERSTANDING](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63)
Aurthors: *Moyan Mei*, *Rohit Sroch*
## Abstract
With the widespread use of pre-trained language models (PLM), there has been increased research on how to make them applicable, especially in limited-resource or low latency high throughput scenarios. One of the dominant approaches is knowledge distillation (KD), where a smaller model is trained by receiving guidance from a large PLM. While there are many successful designs for learning knowledge from teachers, it remains unclear how students can learn better. Inspired by real university teaching processes, in this work we further explore knowledge distillation and propose a very simple yet effective framework, SEAD, to further improve task-specific generalization by utilizing multiple teachers. Our experiments show that SEAD leads to better performance compared to other popular KD methods [[1](https://arxiv.org/abs/1910.01108)] [[2](https://arxiv.org/abs/1909.10351)] [[3](https://arxiv.org/abs/2002.10957)] and achieves comparable or superior performance to its teacher model such as BERT [[4](https://arxiv.org/abs/1810.04805)] on total 13 tasks for the GLUE [[5](https://arxiv.org/abs/1804.07461)] and SuperGLUE [[6](https://arxiv.org/abs/1905.00537)] benchmarks.
*Moyan Mei and Rohit Sroch. 2022. [SEAD: Simple ensemble and knowledge distillation framework for natural language understanding](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63).
Lattice, THE MACHINE LEARNING JOURNAL by Association of Data Scientists, 3(1).*
## SEAD-L-6_H-384_A-12-mrpc
This is a student model distilled from [**BERT base**](https://huggingface.co/bert-base-uncased) as teacher by using SEAD framework on **mrpc** task. For weights initialization, we used [microsoft/xtremedistil-l6-h384-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h384-uncased)
## All SEAD Checkpoints
Other Community Checkpoints: [here](https://huggingface.co/models?search=SEAD)
## Intended uses & limitations
More information needed
### Training hyperparameters
Please take a look at the `training_args.bin` file
```python
$ import torch
$ hyperparameters = torch.load(os.path.join('training_args.bin'))
```
### Evaluation results
| eval_accuracy | eval_f1 | eval_runtime | eval_samples_per_second | eval_steps_per_second | eval_loss | eval_samples |
|:-------------:|:-------:|:------------:|:-----------------------:|:---------------------:|:---------:|:------------:|
| 0.9093 | 0.9345 | 1.1947 | 341.494 | 10.881 | 0.4309 | 408 |
### Framework versions
- Transformers >=4.8.0
- Pytorch >=1.6.0
- TensorFlow >=2.5.0
- Flax >=0.3.5
- Datasets >=1.10.2
- Tokenizers >=0.11.6
If you use these models, please cite the following paper:
```
@article{article,
author={Mei, Moyan and Sroch, Rohit},
title={SEAD: Simple Ensemble and Knowledge Distillation Framework for Natural Language Understanding},
volume={3},
number={1},
journal={Lattice, The Machine Learning Journal by Association of Data Scientists},
day={26},
year={2022},
month={Feb},
url = {www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63}
}
```
|
course5i/SEAD-L-6_H-256_A-8-mrpc | 6341b3fe95529ee67705954ee0268e24d929597b | 2022-06-12T20:35:41.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"text-classification",
"en",
"dataset:glue",
"dataset:mrpc",
"arxiv:1910.01108",
"arxiv:1909.10351",
"arxiv:2002.10957",
"arxiv:1810.04805",
"arxiv:1804.07461",
"arxiv:1905.00537",
"transformers",
"SEAD",
"license:apache-2.0"
] | text-classification | false | course5i | null | course5i/SEAD-L-6_H-256_A-8-mrpc | 5 | null | transformers | 17,429 | ---
language:
- en
license: apache-2.0
tags:
- SEAD
datasets:
- glue
- mrpc
---
## Paper
## [SEAD: SIMPLE ENSEMBLE AND KNOWLEDGE DISTILLATION FRAMEWORK FOR NATURAL LANGUAGE UNDERSTANDING](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63)
Aurthors: *Moyan Mei*, *Rohit Sroch*
## Abstract
With the widespread use of pre-trained language models (PLM), there has been increased research on how to make them applicable, especially in limited-resource or low latency high throughput scenarios. One of the dominant approaches is knowledge distillation (KD), where a smaller model is trained by receiving guidance from a large PLM. While there are many successful designs for learning knowledge from teachers, it remains unclear how students can learn better. Inspired by real university teaching processes, in this work we further explore knowledge distillation and propose a very simple yet effective framework, SEAD, to further improve task-specific generalization by utilizing multiple teachers. Our experiments show that SEAD leads to better performance compared to other popular KD methods [[1](https://arxiv.org/abs/1910.01108)] [[2](https://arxiv.org/abs/1909.10351)] [[3](https://arxiv.org/abs/2002.10957)] and achieves comparable or superior performance to its teacher model such as BERT [[4](https://arxiv.org/abs/1810.04805)] on total 13 tasks for the GLUE [[5](https://arxiv.org/abs/1804.07461)] and SuperGLUE [[6](https://arxiv.org/abs/1905.00537)] benchmarks.
*Moyan Mei and Rohit Sroch. 2022. [SEAD: Simple ensemble and knowledge distillation framework for natural language understanding](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63).
Lattice, THE MACHINE LEARNING JOURNAL by Association of Data Scientists, 3(1).*
## SEAD-L-6_H-256_A-8-mrpc
This is a student model distilled from [**BERT base**](https://huggingface.co/bert-base-uncased) as teacher by using SEAD framework on **mrpc** task. For weights initialization, we used [microsoft/xtremedistil-l6-h256-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h256-uncased)
## All SEAD Checkpoints
Other Community Checkpoints: [here](https://huggingface.co/models?search=SEAD)
## Intended uses & limitations
More information needed
### Training hyperparameters
Please take a look at the `training_args.bin` file
```python
$ import torch
$ hyperparameters = torch.load(os.path.join('training_args.bin'))
```
### Evaluation results
| eval_accuracy | eval_f1 | eval_runtime | eval_samples_per_second | eval_steps_per_second | eval_loss | eval_samples |
|:-------------:|:-------:|:------------:|:-----------------------:|:---------------------:|:---------:|:------------:|
| 0.8897 | 0.9206 | 1.4486 | 281.643 | 8.974 | 0.4399 | 408 |
### Framework versions
- Transformers >=4.8.0
- Pytorch >=1.6.0
- TensorFlow >=2.5.0
- Flax >=0.3.5
- Datasets >=1.10.2
- Tokenizers >=0.11.6
If you use these models, please cite the following paper:
```
@article{article,
author={Mei, Moyan and Sroch, Rohit},
title={SEAD: Simple Ensemble and Knowledge Distillation Framework for Natural Language Understanding},
volume={3},
number={1},
journal={Lattice, The Machine Learning Journal by Association of Data Scientists},
day={26},
year={2022},
month={Feb},
url = {www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63}
}
```
|
course5i/SEAD-L-6_H-256_A-8-rte | d10496b489f97e9e24b89ed94e403243de4e42c8 | 2022-06-12T21:02:01.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"text-classification",
"en",
"dataset:glue",
"dataset:rte",
"arxiv:1910.01108",
"arxiv:1909.10351",
"arxiv:2002.10957",
"arxiv:1810.04805",
"arxiv:1804.07461",
"arxiv:1905.00537",
"transformers",
"SEAD",
"license:apache-2.0"
] | text-classification | false | course5i | null | course5i/SEAD-L-6_H-256_A-8-rte | 5 | null | transformers | 17,430 | ---
language:
- en
license: apache-2.0
tags:
- SEAD
datasets:
- glue
- rte
---
## Paper
## [SEAD: SIMPLE ENSEMBLE AND KNOWLEDGE DISTILLATION FRAMEWORK FOR NATURAL LANGUAGE UNDERSTANDING](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63)
Aurthors: *Moyan Mei*, *Rohit Sroch*
## Abstract
With the widespread use of pre-trained language models (PLM), there has been increased research on how to make them applicable, especially in limited-resource or low latency high throughput scenarios. One of the dominant approaches is knowledge distillation (KD), where a smaller model is trained by receiving guidance from a large PLM. While there are many successful designs for learning knowledge from teachers, it remains unclear how students can learn better. Inspired by real university teaching processes, in this work we further explore knowledge distillation and propose a very simple yet effective framework, SEAD, to further improve task-specific generalization by utilizing multiple teachers. Our experiments show that SEAD leads to better performance compared to other popular KD methods [[1](https://arxiv.org/abs/1910.01108)] [[2](https://arxiv.org/abs/1909.10351)] [[3](https://arxiv.org/abs/2002.10957)] and achieves comparable or superior performance to its teacher model such as BERT [[4](https://arxiv.org/abs/1810.04805)] on total 13 tasks for the GLUE [[5](https://arxiv.org/abs/1804.07461)] and SuperGLUE [[6](https://arxiv.org/abs/1905.00537)] benchmarks.
*Moyan Mei and Rohit Sroch. 2022. [SEAD: Simple ensemble and knowledge distillation framework for natural language understanding](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63).
Lattice, THE MACHINE LEARNING JOURNAL by Association of Data Scientists, 3(1).*
## SEAD-L-6_H-256_A-8-rte
This is a student model distilled from [**BERT base**](https://huggingface.co/bert-base-uncased) as teacher by using SEAD framework on **rte** task. For weights initialization, we used [microsoft/xtremedistil-l6-h256-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h256-uncased)
## All SEAD Checkpoints
Other Community Checkpoints: [here](https://huggingface.co/models?search=SEAD)
## Intended uses & limitations
More information needed
### Training hyperparameters
Please take a look at the `training_args.bin` file
```python
$ import torch
$ hyperparameters = torch.load(os.path.join('training_args.bin'))
```
### Evaluation results
| eval_accuracy | eval_runtime | eval_samples_per_second | eval_steps_per_second | eval_loss | eval_samples |
|:-------------:|:------------:|:-----------------------:|:---------------------:|:---------:|:------------:|
| 0.7906 | 1.5528 | 178.391 | 5.796 | 0.6934 | 277 |
### Framework versions
- Transformers >=4.8.0
- Pytorch >=1.6.0
- TensorFlow >=2.5.0
- Flax >=0.3.5
- Datasets >=1.10.2
- Tokenizers >=0.11.6
If you use these models, please cite the following paper:
```
@article{article,
author={Mei, Moyan and Sroch, Rohit},
title={SEAD: Simple Ensemble and Knowledge Distillation Framework for Natural Language Understanding},
volume={3},
number={1},
journal={Lattice, The Machine Learning Journal by Association of Data Scientists},
day={26},
year={2022},
month={Feb},
url = {www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63}
}
```
|
course5i/SEAD-L-6_H-256_A-8-stsb | 5604ed950e04c3bb58a45beec849e082ad10b205 | 2022-06-12T21:12:01.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"text-classification",
"en",
"dataset:glue",
"dataset:stsb",
"arxiv:1910.01108",
"arxiv:1909.10351",
"arxiv:2002.10957",
"arxiv:1810.04805",
"arxiv:1804.07461",
"arxiv:1905.00537",
"transformers",
"SEAD",
"license:apache-2.0"
] | text-classification | false | course5i | null | course5i/SEAD-L-6_H-256_A-8-stsb | 5 | null | transformers | 17,431 | ---
language:
- en
license: apache-2.0
tags:
- SEAD
datasets:
- glue
- stsb
---
## Paper
## [SEAD: SIMPLE ENSEMBLE AND KNOWLEDGE DISTILLATION FRAMEWORK FOR NATURAL LANGUAGE UNDERSTANDING](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63)
Aurthors: *Moyan Mei*, *Rohit Sroch*
## Abstract
With the widespread use of pre-trained language models (PLM), there has been increased research on how to make them applicable, especially in limited-resource or low latency high throughput scenarios. One of the dominant approaches is knowledge distillation (KD), where a smaller model is trained by receiving guidance from a large PLM. While there are many successful designs for learning knowledge from teachers, it remains unclear how students can learn better. Inspired by real university teaching processes, in this work we further explore knowledge distillation and propose a very simple yet effective framework, SEAD, to further improve task-specific generalization by utilizing multiple teachers. Our experiments show that SEAD leads to better performance compared to other popular KD methods [[1](https://arxiv.org/abs/1910.01108)] [[2](https://arxiv.org/abs/1909.10351)] [[3](https://arxiv.org/abs/2002.10957)] and achieves comparable or superior performance to its teacher model such as BERT [[4](https://arxiv.org/abs/1810.04805)] on total 13 tasks for the GLUE [[5](https://arxiv.org/abs/1804.07461)] and SuperGLUE [[6](https://arxiv.org/abs/1905.00537)] benchmarks.
*Moyan Mei and Rohit Sroch. 2022. [SEAD: Simple ensemble and knowledge distillation framework for natural language understanding](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63).
Lattice, THE MACHINE LEARNING JOURNAL by Association of Data Scientists, 3(1).*
## SEAD-L-6_H-256_A-8-stsb
This is a student model distilled from [**BERT base**](https://huggingface.co/bert-base-uncased) as teacher by using SEAD framework on **stsb** task. For weights initialization, we used [microsoft/xtremedistil-l6-h256-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h256-uncased)
## All SEAD Checkpoints
Other Community Checkpoints: [here](https://huggingface.co/models?search=SEAD)
## Intended uses & limitations
More information needed
### Training hyperparameters
Please take a look at the `training_args.bin` file
```python
$ import torch
$ hyperparameters = torch.load(os.path.join('training_args.bin'))
```
### Evaluation results
| eval_pearson | eval_spearmanr | eval_runtime | eval_samples_per_second | eval_steps_per_second | eval_loss | eval_samples |
|:------------:|:--------------:|:------------:|:-----------------------:|:---------------------:|:---------:|:------------:|
| 0.8962 | 0.8978 | 2.1978 | 682.498 | 21.385 | 0.4679 | 1500 |
### Framework versions
- Transformers >=4.8.0
- Pytorch >=1.6.0
- TensorFlow >=2.5.0
- Flax >=0.3.5
- Datasets >=1.10.2
- Tokenizers >=0.11.6
If you use these models, please cite the following paper:
```
@article{article,
author={Mei, Moyan and Sroch, Rohit},
title={SEAD: Simple Ensemble and Knowledge Distillation Framework for Natural Language Understanding},
volume={3},
number={1},
journal={Lattice, The Machine Learning Journal by Association of Data Scientists},
day={26},
year={2022},
month={Feb},
url = {www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63}
}
```
|
course5i/SEAD-L-6_H-256_A-8-qnli | 820d4a62433c69670f9d8b7420684bffc38890ee | 2022-06-12T21:26:48.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"text-classification",
"en",
"dataset:glue",
"dataset:qnli",
"arxiv:1910.01108",
"arxiv:1909.10351",
"arxiv:2002.10957",
"arxiv:1810.04805",
"arxiv:1804.07461",
"arxiv:1905.00537",
"transformers",
"SEAD",
"license:apache-2.0"
] | text-classification | false | course5i | null | course5i/SEAD-L-6_H-256_A-8-qnli | 5 | null | transformers | 17,432 | ---
language:
- en
license: apache-2.0
tags:
- SEAD
datasets:
- glue
- qnli
---
## Paper
## [SEAD: SIMPLE ENSEMBLE AND KNOWLEDGE DISTILLATION FRAMEWORK FOR NATURAL LANGUAGE UNDERSTANDING](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63)
Aurthors: *Moyan Mei*, *Rohit Sroch*
## Abstract
With the widespread use of pre-trained language models (PLM), there has been increased research on how to make them applicable, especially in limited-resource or low latency high throughput scenarios. One of the dominant approaches is knowledge distillation (KD), where a smaller model is trained by receiving guidance from a large PLM. While there are many successful designs for learning knowledge from teachers, it remains unclear how students can learn better. Inspired by real university teaching processes, in this work we further explore knowledge distillation and propose a very simple yet effective framework, SEAD, to further improve task-specific generalization by utilizing multiple teachers. Our experiments show that SEAD leads to better performance compared to other popular KD methods [[1](https://arxiv.org/abs/1910.01108)] [[2](https://arxiv.org/abs/1909.10351)] [[3](https://arxiv.org/abs/2002.10957)] and achieves comparable or superior performance to its teacher model such as BERT [[4](https://arxiv.org/abs/1810.04805)] on total 13 tasks for the GLUE [[5](https://arxiv.org/abs/1804.07461)] and SuperGLUE [[6](https://arxiv.org/abs/1905.00537)] benchmarks.
*Moyan Mei and Rohit Sroch. 2022. [SEAD: Simple ensemble and knowledge distillation framework for natural language understanding](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63).
Lattice, THE MACHINE LEARNING JOURNAL by Association of Data Scientists, 3(1).*
## SEAD-L-6_H-256_A-8-qnli
This is a student model distilled from [**BERT base**](https://huggingface.co/bert-base-uncased) as teacher by using SEAD framework on **qnli** task. For weights initialization, we used [microsoft/xtremedistil-l6-h256-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h256-uncased)
## All SEAD Checkpoints
Other Community Checkpoints: [here](https://huggingface.co/models?search=SEAD)
## Intended uses & limitations
More information needed
### Training hyperparameters
Please take a look at the `training_args.bin` file
```python
$ import torch
$ hyperparameters = torch.load(os.path.join('training_args.bin'))
```
### Evaluation results
| eval_accuracy | eval_runtime | eval_samples_per_second | eval_steps_per_second | eval_loss | eval_samples |
|:-------------:|:------------:|:-----------------------:|:---------------------:|:---------:|:------------:|
| 0.8979 | 4.3663 | 1251.171 | 39.164 | 0.2789 | 5463 |
### Framework versions
- Transformers >=4.8.0
- Pytorch >=1.6.0
- TensorFlow >=2.5.0
- Flax >=0.3.5
- Datasets >=1.10.2
- Tokenizers >=0.11.6
If you use these models, please cite the following paper:
```
@article{article,
author={Mei, Moyan and Sroch, Rohit},
title={SEAD: Simple Ensemble and Knowledge Distillation Framework for Natural Language Understanding},
volume={3},
number={1},
journal={Lattice, The Machine Learning Journal by Association of Data Scientists},
day={26},
year={2022},
month={Feb},
url = {www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63}
}
```
|
course5i/SEAD-L-6_H-256_A-8-qqp | b243ccc1372eb22e3465c64d81a4cd311ca33a94 | 2022-06-12T22:02:45.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"text-classification",
"en",
"dataset:glue",
"dataset:qqp",
"arxiv:1910.01108",
"arxiv:1909.10351",
"arxiv:2002.10957",
"arxiv:1810.04805",
"arxiv:1804.07461",
"arxiv:1905.00537",
"transformers",
"SEAD",
"license:apache-2.0"
] | text-classification | false | course5i | null | course5i/SEAD-L-6_H-256_A-8-qqp | 5 | null | transformers | 17,433 | ---
language:
- en
license: apache-2.0
tags:
- SEAD
datasets:
- glue
- qqp
---
## Paper
## [SEAD: SIMPLE ENSEMBLE AND KNOWLEDGE DISTILLATION FRAMEWORK FOR NATURAL LANGUAGE UNDERSTANDING](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63)
Aurthors: *Moyan Mei*, *Rohit Sroch*
## Abstract
With the widespread use of pre-trained language models (PLM), there has been increased research on how to make them applicable, especially in limited-resource or low latency high throughput scenarios. One of the dominant approaches is knowledge distillation (KD), where a smaller model is trained by receiving guidance from a large PLM. While there are many successful designs for learning knowledge from teachers, it remains unclear how students can learn better. Inspired by real university teaching processes, in this work we further explore knowledge distillation and propose a very simple yet effective framework, SEAD, to further improve task-specific generalization by utilizing multiple teachers. Our experiments show that SEAD leads to better performance compared to other popular KD methods [[1](https://arxiv.org/abs/1910.01108)] [[2](https://arxiv.org/abs/1909.10351)] [[3](https://arxiv.org/abs/2002.10957)] and achieves comparable or superior performance to its teacher model such as BERT [[4](https://arxiv.org/abs/1810.04805)] on total 13 tasks for the GLUE [[5](https://arxiv.org/abs/1804.07461)] and SuperGLUE [[6](https://arxiv.org/abs/1905.00537)] benchmarks.
*Moyan Mei and Rohit Sroch. 2022. [SEAD: Simple ensemble and knowledge distillation framework for natural language understanding](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63).
Lattice, THE MACHINE LEARNING JOURNAL by Association of Data Scientists, 3(1).*
## SEAD-L-6_H-256_A-8-qqp
This is a student model distilled from [**BERT base**](https://huggingface.co/bert-base-uncased) as teacher by using SEAD framework on **qqp** task. For weights initialization, we used [microsoft/xtremedistil-l6-h256-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h256-uncased)
## All SEAD Checkpoints
Other Community Checkpoints: [here](https://huggingface.co/models?search=SEAD)
## Intended uses & limitations
More information needed
### Training hyperparameters
Please take a look at the `training_args.bin` file
```python
$ import torch
$ hyperparameters = torch.load(os.path.join('training_args.bin'))
```
### Evaluation results
| eval_accuracy | eval_f1 | eval_runtime | eval_samples_per_second | eval_steps_per_second | eval_loss | eval_samples |
|:-------------:|:-------:|:------------:|:-----------------------:|:---------------------:|:---------:|:------------:|
| 0.9065 | 0.8746 | 21.3929 | 1889.88 | 59.085 | 0.3154 | 40430 |
### Framework versions
- Transformers >=4.8.0
- Pytorch >=1.6.0
- TensorFlow >=2.5.0
- Flax >=0.3.5
- Datasets >=1.10.2
- Tokenizers >=0.11.6
If you use these models, please cite the following paper:
```
@article{article,
author={Mei, Moyan and Sroch, Rohit},
title={SEAD: Simple Ensemble and Knowledge Distillation Framework for Natural Language Understanding},
volume={3},
number={1},
journal={Lattice, The Machine Learning Journal by Association of Data Scientists},
day={26},
year={2022},
month={Feb},
url = {www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63}
}
```
|
course5i/SEAD-L-6_H-384_A-12-wnli | e209229650138c93a8498e858c8b79abb6f7d519 | 2022-06-12T23:09:21.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"text-classification",
"en",
"dataset:glue",
"dataset:wnli",
"arxiv:1910.01108",
"arxiv:1909.10351",
"arxiv:2002.10957",
"arxiv:1810.04805",
"arxiv:1804.07461",
"arxiv:1905.00537",
"transformers",
"SEAD",
"license:apache-2.0"
] | text-classification | false | course5i | null | course5i/SEAD-L-6_H-384_A-12-wnli | 5 | null | transformers | 17,434 | ---
language:
- en
license: apache-2.0
tags:
- SEAD
datasets:
- glue
- wnli
---
## Paper
## [SEAD: SIMPLE ENSEMBLE AND KNOWLEDGE DISTILLATION FRAMEWORK FOR NATURAL LANGUAGE UNDERSTANDING](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63)
Aurthors: *Moyan Mei*, *Rohit Sroch*
## Abstract
With the widespread use of pre-trained language models (PLM), there has been increased research on how to make them applicable, especially in limited-resource or low latency high throughput scenarios. One of the dominant approaches is knowledge distillation (KD), where a smaller model is trained by receiving guidance from a large PLM. While there are many successful designs for learning knowledge from teachers, it remains unclear how students can learn better. Inspired by real university teaching processes, in this work we further explore knowledge distillation and propose a very simple yet effective framework, SEAD, to further improve task-specific generalization by utilizing multiple teachers. Our experiments show that SEAD leads to better performance compared to other popular KD methods [[1](https://arxiv.org/abs/1910.01108)] [[2](https://arxiv.org/abs/1909.10351)] [[3](https://arxiv.org/abs/2002.10957)] and achieves comparable or superior performance to its teacher model such as BERT [[4](https://arxiv.org/abs/1810.04805)] on total 13 tasks for the GLUE [[5](https://arxiv.org/abs/1804.07461)] and SuperGLUE [[6](https://arxiv.org/abs/1905.00537)] benchmarks.
*Moyan Mei and Rohit Sroch. 2022. [SEAD: Simple ensemble and knowledge distillation framework for natural language understanding](https://www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63).
Lattice, THE MACHINE LEARNING JOURNAL by Association of Data Scientists, 3(1).*
## SEAD-L-6_H-384_A-12-wnli
This is a student model distilled from [**BERT base**](https://huggingface.co/bert-base-uncased) as teacher by using SEAD framework on **wnli** task. For weights initialization, we used [microsoft/xtremedistil-l6-h384-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h384-uncased)
## All SEAD Checkpoints
Other Community Checkpoints: [here](https://huggingface.co/models?search=SEAD)
## Intended uses & limitations
More information needed
### Training hyperparameters
Please take a look at the `training_args.bin` file
```python
$ import torch
$ hyperparameters = torch.load(os.path.join('training_args.bin'))
```
### Evaluation results
| eval_accuracy | eval_runtime | eval_samples_per_second | eval_steps_per_second | eval_loss | eval_samples |
|:-------------:|:------------:|:-----------------------:|:---------------------:|:---------:|:------------:|
| 0.5775 | 1.2959 | 54.787 | 2.315 | 0.6717 | 71 |
### Framework versions
- Transformers >=4.8.0
- Pytorch >=1.6.0
- TensorFlow >=2.5.0
- Flax >=0.3.5
- Datasets >=1.10.2
- Tokenizers >=0.11.6
If you use these models, please cite the following paper:
```
@article{article,
author={Mei, Moyan and Sroch, Rohit},
title={SEAD: Simple Ensemble and Knowledge Distillation Framework for Natural Language Understanding},
volume={3},
number={1},
journal={Lattice, The Machine Learning Journal by Association of Data Scientists},
day={26},
year={2022},
month={Feb},
url = {www.adasci.org/journals/lattice-35309407/?volumes=true&open=621a3b18edc4364e8a96cb63}
}
```
|
anvayS/reddit-aita-classifier | 6cb9a1b9803163bb2a9108fe89bc7d1cc1da609d | 2022-06-13T09:08:26.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | anvayS | null | anvayS/reddit-aita-classifier | 5 | null | transformers | 17,435 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: reddit-aita-classifier
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# reddit-aita-classifier
This model is a fine-tuned version of [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1667
- Accuracy: 0.9497
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.5866 | 1.0 | 1250 | 0.5692 | 0.7247 |
| 0.5638 | 2.0 | 2500 | 0.4841 | 0.7813 |
| 0.4652 | 3.0 | 3750 | 0.2712 | 0.9077 |
| 0.3088 | 4.0 | 5000 | 0.1667 | 0.9497 |
### Framework versions
- Transformers 4.19.4
- Pytorch 1.11.0+cu113
- Datasets 2.2.2
- Tokenizers 0.12.1
|
huggingtweets/egbertchannel | 257e29d002d9b4fc0c9160579e2975ef014b761e | 2022-06-13T15:49:21.000Z | [
"pytorch",
"gpt2",
"text-generation",
"en",
"transformers",
"huggingtweets"
] | text-generation | false | huggingtweets | null | huggingtweets/egbertchannel | 5 | null | transformers | 17,436 | ---
language: en
thumbnail: http://www.huggingtweets.com/egbertchannel/1655135356461/predictions.png
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1244575861912883201/2J-Ehfg3_400x400.jpg')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">Egbert</div>
<div style="text-align: center; font-size: 14px;">@egbertchannel</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from Egbert.
| Data | Egbert |
| --- | --- |
| Tweets downloaded | 3243 |
| Retweets | 272 |
| Short tweets | 496 |
| Tweets kept | 2475 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/he6lzjtk/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @egbertchannel's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/29xg9gi3) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/29xg9gi3/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/egbertchannel')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
eslamxm/AraT5-base-finetune-ar-wikilingua | 3ed6e7474e7b91deaa07d636a7f9df923b47beaf | 2022-06-14T02:30:20.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:wiki_lingua",
"transformers",
"summarization",
"ar",
"Abstractive Summarization",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | summarization | false | eslamxm | null | eslamxm/AraT5-base-finetune-ar-wikilingua | 5 | null | transformers | 17,437 | ---
tags:
- summarization
- ar
- Abstractive Summarization
- generated_from_trainer
datasets:
- wiki_lingua
model-index:
- name: AraT5-base-finetune-ar-wikilingua
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# AraT5-base-finetune-ar-wikilingua
This model is a fine-tuned version of [UBC-NLP/AraT5-base](https://huggingface.co/UBC-NLP/AraT5-base) on the wiki_lingua dataset.
It achieves the following results on the evaluation set:
- Loss: 4.6110
- Rouge-1: 19.97
- Rouge-2: 6.9
- Rouge-l: 18.25
- Gen Len: 18.45
- Bertscore: 69.44
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0005
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- gradient_accumulation_steps: 16
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 250
- num_epochs: 10
- label_smoothing_factor: 0.1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge-1 | Rouge-2 | Rouge-l | Gen Len | Bertscore |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:-------:|:---------:|
| 11.5412 | 1.0 | 312 | 6.8825 | 5.2 | 0.69 | 5.04 | 19.0 | 63.2 |
| 6.5212 | 2.0 | 624 | 5.8992 | 8.89 | 1.4 | 8.36 | 17.92 | 63.9 |
| 5.8302 | 3.0 | 936 | 5.3712 | 9.99 | 2.21 | 9.54 | 15.87 | 65.08 |
| 5.406 | 4.0 | 1248 | 5.0632 | 13.94 | 3.5 | 13.0 | 15.95 | 66.83 |
| 5.1109 | 5.0 | 1560 | 4.8718 | 15.28 | 4.34 | 14.27 | 18.26 | 66.83 |
| 4.9004 | 6.0 | 1872 | 4.7631 | 16.65 | 4.92 | 15.46 | 17.73 | 67.75 |
| 4.754 | 7.0 | 2184 | 4.6920 | 18.31 | 5.79 | 16.9 | 18.17 | 68.55 |
| 4.6369 | 8.0 | 2496 | 4.6459 | 18.6 | 6.12 | 17.16 | 18.16 | 68.66 |
| 4.5595 | 9.0 | 2808 | 4.6153 | 18.94 | 6.1 | 17.39 | 17.82 | 68.99 |
| 4.4967 | 10.0 | 3120 | 4.6110 | 19.15 | 6.25 | 17.55 | 17.91 | 69.09 |
### Framework versions
- Transformers 4.19.4
- Pytorch 1.11.0+cu113
- Datasets 2.2.2
- Tokenizers 0.12.1
|
Chemsseddine/bert2gpt2SUMM | 4c2d6088f4ca5a36a6575dc747b1b20af95137e9 | 2022-06-29T11:06:12.000Z | [
"pytorch",
"encoder-decoder",
"text2text-generation",
"Fr",
"dataset:Chemsseddine/autotrain-data-bertSummGpt2",
"transformers",
"co2_eq_emissions",
"autotrain_compatible"
] | text2text-generation | false | Chemsseddine | null | Chemsseddine/bert2gpt2SUMM | 5 | null | transformers | 17,438 | ---
language: Fr
widget:
- text: "Your text here"
datasets:
- Chemsseddine/autotrain-data-bertSummGpt2
co2_eq_emissions: 0.10685501288084795
---
<img src="https://huggingface.co/Chemsseddine/bert2gpt2_med_ml_orange_summ-finetuned_med_sum_new-finetuned_med_sum_new/resolve/main/logobert2gpt2.png" alt="Map of positive probabilities per country." width="200"/>
## This model is used for french summarization
- Problem type: Summarization
- Model ID: 980832493
- CO2 Emissions (in grams): 0.10685501288084795
## Validation Metrics
- Loss: 4.03749418258667
- Rouge1: 28.8384
- Rouge2: 10.7511
- RougeL: 27.0842
- RougeLsum: 27.5118
- Gen Len: 22.0625
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/Chemsseddine/autotrain-bertSummGpt2-980832493
``` |
sampras343/wav2vec2-base-ft-keyword-spotting | 61c834dbe30612037c5b2967a2804f26c39d124d | 2022-06-14T10:02:24.000Z | [
"pytorch",
"tensorboard",
"dataset:superb",
"audio-classification",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | audio-classification | false | sampras343 | null | sampras343/wav2vec2-base-ft-keyword-spotting | 5 | null | null | 17,439 | ---
license: apache-2.0
tags:
- audio-classification
- generated_from_trainer
datasets:
- superb
metrics:
- accuracy
model-index:
- name: wav2vec2-base-ft-keyword-spotting
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-ft-keyword-spotting
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the superb dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0824
- Accuracy: 0.9826
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 3e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 0
- gradient_accumulation_steps: 4
- total_train_batch_size: 128
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 5.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.8972 | 1.0 | 399 | 0.7023 | 0.8174 |
| 0.3274 | 2.0 | 798 | 0.1634 | 0.9773 |
| 0.1993 | 3.0 | 1197 | 0.1048 | 0.9788 |
| 0.1777 | 4.0 | 1596 | 0.0824 | 0.9826 |
| 0.1527 | 5.0 | 1995 | 0.0812 | 0.9810 |
### Framework versions
- Transformers 4.12.0.dev0
- Pytorch 1.9.1+cu111
- Datasets 1.14.0
- Tokenizers 0.10.3
|
Auruncus/gpt-j-6b-8bit-ft-v1 | f78d7c22068be39f4642a19467acaac6b450672a | 2022-06-15T18:07:48.000Z | [
"pytorch",
"gptj",
"text-generation",
"transformers"
] | text-generation | false | Auruncus | null | Auruncus/gpt-j-6b-8bit-ft-v1 | 5 | null | transformers | 17,440 | Entry not found |
totoro4007/cryptobert-base-all-finetuned | 28e871cf4f4cca2f0a2e87e35ddd445d67cfaea1 | 2022-06-15T03:15:09.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | totoro4007 | null | totoro4007/cryptobert-base-all-finetuned | 5 | null | transformers | 17,441 | Entry not found |
olpa/pegasus-samsum | d5c0e8cff13e1ee845473e86e4ae20dbd7ae2d33 | 2022-06-15T04:40:48.000Z | [
"pytorch",
"tensorboard",
"pegasus",
"text2text-generation",
"dataset:samsum",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | olpa | null | olpa/pegasus-samsum | 5 | null | transformers | 17,442 | ---
tags:
- generated_from_trainer
datasets:
- samsum
model-index:
- name: pegasus-samsum
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pegasus-samsum
This model is a fine-tuned version of [google/pegasus-cnn_dailymail](https://huggingface.co/google/pegasus-cnn_dailymail) on the samsum dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4863
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- gradient_accumulation_steps: 16
- total_train_batch_size: 16
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.7014 | 0.54 | 500 | 1.4863 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0
- Datasets 2.1.0
- Tokenizers 0.12.1
|
aymanashour/summ | dc01f1f6722baa97d2a13605cc39349f8ef3dd41 | 2022-06-16T00:18:21.000Z | [
"pytorch",
"megatron-bert",
"text-classification",
"transformers",
"license:other"
] | text-classification | false | aymanashour | null | aymanashour/summ | 5 | null | transformers | 17,443 | ---
license: other
---
|
fourthbrain-demo/finetuning-sentiment-model-3000-samples | edca6fe47d153165e381cbe020389d15530a79a0 | 2022-06-15T22:51:47.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | fourthbrain-demo | null | fourthbrain-demo/finetuning-sentiment-model-3000-samples | 5 | null | transformers | 17,444 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
model-index:
- name: finetuning-sentiment-model-3000-samples
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetuning-sentiment-model-3000-samples
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3023
- Accuracy: 0.8767
- F1: 0.8771
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.19.4
- Pytorch 1.11.0+cu113
- Tokenizers 0.12.1
|
aymanashour/summ2 | 140e761aab80742141362f07cd6e6df9cdee8e3f | 2022-06-15T23:30:34.000Z | [
"pytorch",
"megatron-bert",
"text-classification",
"transformers",
"license:apache-2.0"
] | text-classification | false | aymanashour | null | aymanashour/summ2 | 5 | null | transformers | 17,445 | ---
license: apache-2.0
---
|
microsoft/swinv2-base-patch4-window12to16-192to256-22kto1k-ft | 03d5fcecf39d909480044f3b4f46c6a7ae09fb11 | 2022-07-09T05:31:21.000Z | [
"pytorch",
"swinv2",
"transformers"
] | null | false | microsoft | null | microsoft/swinv2-base-patch4-window12to16-192to256-22kto1k-ft | 5 | null | transformers | 17,446 | Entry not found |
rbawden/CCASS-semi-auto-titrages-base | 8f896fb26833145f0b1e9461b1392951b3ba4241 | 2022-07-05T21:42:57.000Z | [
"pytorch",
"fsmt",
"fr",
"transformers",
"license:cc-by-4.0"
] | null | false | rbawden | null | rbawden/CCASS-semi-auto-titrages-base | 5 | null | transformers | 17,447 | ---
language: fr
license: cc-by-4.0
---
# Cour de Cassation semi-automatic *titrage* prediction model
Model for the semi-automatic prediction of *titrages* (keyword sequence) from *sommaires* (synthesis of legal cases).
The models are similar to the automatic models described in [this paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf) and to the model available [here](https://huggingface.co/rbawden/CCASS-pred-titrages-base). If you use this semi-automatic model, please cite our research paper (see [below](#cite)).
## Model description
The model is a transformer-base model trained on parallel data (sommaires-titrages) provided by the Cour de Cassation. The model was intially trained using the Fairseq toolkit, converted to HuggingFace and then fine-tuned on the original training data to smooth out minor differences that arose during the conversion process. Tokenisation is performed using a SentencePiece model, the BPE strategy and a vocab size of 8000.
### Intended uses & limitations
This model is to be used to help in the production of *titrages* for those *sommaires* that do not have them or to complement existing (manually) created *titrages*.
### How to use
Contrary to the [automatic *titrage* prediction model](https://huggingface.co/rbawden/CCASS-pred-titrages-base) (designed to predict the entire sequence), this model is designed to help in the manual production of *titrages*, by proposing the next *titre* (keyword) in the sequence given a *sommaire* and the beginning of the *titrage*.
Model input is the *matière* (matter) concatenated to the *titres* already decided on (separated by <t>), concatenated to the text from the sommaire separated by the token `<t>`. Each example should be on a single line. E.g. `bail <t> résiliation <t> causes <t> La recommendation du tribunal selon l'article...` (fictive example for illustrative purposes, where the matter=bail, the beginning of the *titrage*=résiliation <t> causes. The maximum input length of the model is 1024 input tokens (after tokenisation).
```
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokeniser = AutoTokenizer.from_pretrained("rbawden/CCASS-semi-auto-titrages-base")
model = AutoModelForSeq2SeqLM.from_pretrained("rbawden/CCASS-semi-auto-titrages-base")
matiere_and_titrage_prefix = "matter <t> titre"
sommaire = "full text from the sommaire on a single line"
inputs = tokeniser([matiere_and_titrage_prefix + " <t> " + sommaire], return_tensors='pt')
outputs = model.generate(inputs['input_ids'])
tokeniser.batch_decode(outputs, skip_special_tokens=True, clean_up_tokenisation_spaces=True)
```
### Limitations and bias
The models' predictions should not be taken as ground-truth *titrages* and the final decision should be the expert's. The model is not constrained to predict *titres* that have previously been seen, so this should be taken into account in the deployment of this model as a *titrage* tool in order to avoid the multiplication of different *titres*.
## Training data
Training data is provided by the Cour de Cassation (the original source being Jurinet data, but with pseudo-anonymisation applied). For training, we use a total of 159,836 parallel examples (each example is a sommaire-titrage pair). Our development data consists of 1,833 held-out examples.
## Training procedure
### Preprocessing
We use SentencePiece, the BPE strategy and a joint vocabulary of 8000 tokens. This model was converted into the HuggingFace format and integrates a number of normalisation processes (e.g. removing double doubles, apostrophes and quotes, normalisation of different accent formats, lowercasing).
### Training
The model was initialised trained using Fairseq until convergence on the development set (according to our customised weighted accuracy measure - please see [the paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf) for more details). The model was then converted to HuggingFace and training continued to smooth out incoherences introduced during the conversion procedure (incompatibilities in the way the SentencePiece and NMT vocabularies are defined, linked to HuggingFace vocabularies being necessarily the same as the tokeniser vocabulary, a constraint that is not imposed in Fairseq).
### Evaluation results
Full results for the initial (automatic) Fairseq models can be found in [the paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf).
Results on this semi-automatic model coming soon!
## BibTex entry and citation info
<a name="cite"></a>
If you use this work, please cite the following article:
Thibault Charmet, Inès Cherichi, Matthieu Allain, Urszula Czerwinska, Amaury Fouret, Benoît Sagot and Rachel Bawden, 2022. [**Complex Labelling and Similarity Prediction in Legal Texts: Automatic Analysis of France’s Court of Cassation Rulings**](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.509.pdf). In Proceedings of the 13th Language Resources and Evaluation Conference, Marseille, France.]
```
@inproceedings{charmet-et-al-2022-complex,
tite = {Complex Labelling and Similarity Prediction in Legal Texts: Automatic Analysis of France’s Court of Cassation Rulings},
author = {Charmet, Thibault and Cherichi, Inès and Allain, Matthieu and Czerwinska, Urszula and Fouret, Amaury, and Sagot, Benoît and Bawden, Rachel},
booktitle = {Proceedings of the 13th Language Resources and Evaluation Conference},
year = {2022},
address = {Marseille, France},
pages = {4754--4766},
url = {http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.509.pdf}
```
|
chlab/efficientnet_75_planet_detection | 08a757a3100fb0b805916c245697a802b2fbe9aa | 2022-06-17T14:23:52.000Z | [
"pytorch",
"Python 3.7+",
"dataset:imagenet",
"dataset:imagenet-21k",
"transformers",
"vision",
"image-classification",
"license:apache-2.0"
] | image-classification | false | chlab | null | chlab/efficientnet_75_planet_detection | 5 | null | transformers | 17,448 | ---
language:
- Python 3.7+
license: apache-2.0
tags:
- vision
- image-classification
datasets:
- imagenet
- imagenet-21k
widget:
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
example_title: Tiger
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
example_title: Teapot
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
example_title: Palace
---
# Efficientnetv2 (75 channels) |
WENGSYX/MedCPT | 44d08ce838bbb76962839490c2ef2ddd65772ca7 | 2022-07-15T05:14:21.000Z | [
"pytorch",
"bart",
"feature-extraction",
"transformers"
] | feature-extraction | false | WENGSYX | null | WENGSYX/MedCPT | 5 | null | transformers | 17,449 | # MedCPT
###### LingYi system pre training medical model
###### Prease load the model from [**CPT**](https://huggingface.co/fnlp/cpt-large)
## Usage
```python
>>> from modeling_cpt import CPTForConditionalGeneration
>>> from transformers import BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("WENGSYX/MedCPT")
>>> model = CPTForConditionalGeneration.from_pretrained("WENGSYX/MedCPT")
>>> inputs = tokenizer.encode("医生你好,腹泻难受应该怎么办?", return_tensors='pt')
>>> pred_ids = model.generate(input_ids, num_beams=4, max_length=20)
>>> print(tokenizer.convert_ids_to_tokens(pred_ids[i]))
``` |
jkhan447/sarcasm-detection-RoBerta-base-newdata | 57a79d0ab45ae1b8ab5d707fec4360b566543730 | 2022-06-17T14:34:34.000Z | [
"pytorch",
"tensorboard",
"roberta",
"text-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
] | text-classification | false | jkhan447 | null | jkhan447/sarcasm-detection-RoBerta-base-newdata | 5 | null | transformers | 17,450 | ---
license: mit
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: sarcasm-detection-RoBerta-base-newdata
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# sarcasm-detection-RoBerta-base-newdata
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4844
- Accuracy: 0.7824
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.20.0
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
huggingtweets/techreview | b1ca853b2e6e40184d0771f489c6384aacf45b2c | 2022-06-17T09:38:07.000Z | [
"pytorch",
"gpt2",
"text-generation",
"en",
"transformers",
"huggingtweets"
] | text-generation | false | huggingtweets | null | huggingtweets/techreview | 5 | null | transformers | 17,451 | ---
language: en
thumbnail: http://www.huggingtweets.com/techreview/1655458683048/predictions.png
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1072880528712495106/ahuQUlOb_400x400.jpg')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">MIT Technology Review</div>
<div style="text-align: center; font-size: 14px;">@techreview</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from MIT Technology Review.
| Data | MIT Technology Review |
| --- | --- |
| Tweets downloaded | 3250 |
| Retweets | 293 |
| Short tweets | 1 |
| Tweets kept | 2956 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1zbwqwsb/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @techreview's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2bzg3pev) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2bzg3pev/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/techreview')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
efederici/convnext-base-224-22k-1k-orig-cats-vs-dogs | b16e21af48c1f4433f997d2da9e4675babee28da | 2022-06-17T14:11:20.000Z | [
"pytorch",
"tensorboard",
"convnext",
"image-classification",
"dataset:cats_vs_dogs",
"arxiv:2201.03545",
"transformers",
"vision",
"license:apache-2.0",
"model-index"
] | image-classification | false | efederici | null | efederici/convnext-base-224-22k-1k-orig-cats-vs-dogs | 5 | null | transformers | 17,452 | ---
license: apache-2.0
tags:
- image-classification
- vision
datasets:
- cats_vs_dogs
metrics:
- accuracy
model-index:
- name: convnext-base-224-22k-1k-orig-cats-vs-dogs
results:
- task:
name: Image Classification
type: image-classification
dataset:
name: cats_vs_dogs
type: cats_vs_dogs
args: default
metrics:
- name: Accuracy
type: accuracy
value: 0.9973333333333333
---
# convnext-base-224-22k-1k-orig-cats-vs-dogs
This model is a fine-tuned version of [facebook/convnext-base-224-22k-1k](https://huggingface.co/facebook/convnext-base-224-22k-1k) on the cats_vs_dogs dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0103
- Accuracy: 0.9973
<p align="center">
<img src="https://files.ocula.com/anzax/09/09f77133-7740-4130-a567-84fb56736362_650_544.jpg" width="600"> </br>
Jockum Nordström, Cat Dog Cat, 2016
</p>
## Model description
The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5.0
### BibTeX entry and citation info
```bibtex
@article{DBLP:journals/corr/abs-2201-03545,
author = {Zhuang Liu and
Hanzi Mao and
Chao{-}Yuan Wu and
Christoph Feichtenhofer and
Trevor Darrell and
Saining Xie},
title = {A ConvNet for the 2020s},
journal = {CoRR},
volume = {abs/2201.03545},
year = {2022},
url = {https://arxiv.org/abs/2201.03545},
eprinttype = {arXiv},
eprint = {2201.03545},
timestamp = {Thu, 20 Jan 2022 14:21:35 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-2201-03545.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
``` |
Suryabhan/tiny-bert-sst2-distilled | cf81b7d7c5303541379cd67eedd4069bb4f85f44 | 2022-06-27T03:41:00.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Suryabhan | null | Suryabhan/tiny-bert-sst2-distilled | 5 | null | transformers | 17,453 | Entry not found |
skpawar1305/wav2vec2-base-finetuned-ks | 52d090170a472c3f4027948a8a8a335401a01800 | 2022-06-18T11:12:09.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"audio-classification",
"dataset:superb",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | audio-classification | false | skpawar1305 | null | skpawar1305/wav2vec2-base-finetuned-ks | 5 | null | transformers | 17,454 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- superb
metrics:
- accuracy
model-index:
- name: wav2vec2-base-finetuned-ks
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-finetuned-ks
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the superb dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0903
- Accuracy: 0.9834
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 3e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 128
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.7264 | 1.0 | 399 | 0.6319 | 0.9351 |
| 0.2877 | 2.0 | 798 | 0.1846 | 0.9748 |
| 0.175 | 3.0 | 1197 | 0.1195 | 0.9796 |
| 0.1672 | 4.0 | 1596 | 0.0903 | 0.9834 |
| 0.1235 | 5.0 | 1995 | 0.0854 | 0.9825 |
### Framework versions
- Transformers 4.20.0
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
AlyxTheKitten/DialoGPT-medium-Jimmis-2 | a13b76b2835e62293b2000eeaa806f0f1624a200 | 2022-06-18T05:46:35.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | AlyxTheKitten | null | AlyxTheKitten/DialoGPT-medium-Jimmis-2 | 5 | null | transformers | 17,455 | ---
tags:
- conversational
---
# AgedBlaine DialoGPT Model 2 |
huggingtweets/andrewdoyle_com-conceptualjames-titaniamcgrath | ba5b93781389f75d92763d7ce172c41547129215 | 2022-06-18T09:11:46.000Z | [
"pytorch",
"gpt2",
"text-generation",
"en",
"transformers",
"huggingtweets"
] | text-generation | false | huggingtweets | null | huggingtweets/andrewdoyle_com-conceptualjames-titaniamcgrath | 5 | null | transformers | 17,456 | ---
language: en
thumbnail: http://www.huggingtweets.com/andrewdoyle_com-conceptualjames-titaniamcgrath/1655543501221/predictions.png
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/991329326846087169/vxothdvT_400x400.jpg')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1283787273310556161/HpOtnzmp_400x400.jpg')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1459175734602350593/cW3fs5lR_400x400.jpg')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">Titania McGrath & Andrew Doyle & James Lindsay, weaponizing your mom</div>
<div style="text-align: center; font-size: 14px;">@andrewdoyle_com-conceptualjames-titaniamcgrath</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from Titania McGrath & Andrew Doyle & James Lindsay, weaponizing your mom.
| Data | Titania McGrath | Andrew Doyle | James Lindsay, weaponizing your mom |
| --- | --- | --- | --- |
| Tweets downloaded | 2873 | 3232 | 3226 |
| Retweets | 220 | 781 | 1222 |
| Short tweets | 104 | 306 | 587 |
| Tweets kept | 2549 | 2145 | 1417 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1dewpz75/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @andrewdoyle_com-conceptualjames-titaniamcgrath's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3ed5g462) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3ed5g462/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/andrewdoyle_com-conceptualjames-titaniamcgrath')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
eslamxm/mbart-finetune-ar-xlsum | 64bb51fb268d4800a4dc556f425126dc8645049e | 2022-06-19T03:58:19.000Z | [
"pytorch",
"tensorboard",
"mbart",
"text2text-generation",
"dataset:xlsum",
"transformers",
"summarization",
"ar",
"seq2seq",
"Abstractive Summarization",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | summarization | false | eslamxm | null | eslamxm/mbart-finetune-ar-xlsum | 5 | null | transformers | 17,457 | ---
tags:
- summarization
- ar
- seq2seq
- mbart
- Abstractive Summarization
- generated_from_trainer
datasets:
- xlsum
model-index:
- name: mbart-finetune-ar-xlsum
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mbart-finetune-ar-xlsum
This model is a fine-tuned version of [facebook/mbart-large-50](https://huggingface.co/facebook/mbart-large-50) on the xlsum dataset.
It achieves the following results on the evaluation set:
- Loss: 4.4328
- Rouge-1: 15.56
- Rouge-2: 4.64
- Rouge-l: 13.59
- Gen Len: 38.86
- Bertscore: 71.53
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0005
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 250
- num_epochs: 5
- label_smoothing_factor: 0.1
### Training results
### Framework versions
- Transformers 4.20.0
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
twhitehurst3/autotrain-blaze_text_classification-1004733283 | 9f28c51b04f8244daea3178f2a9b4dd82deab94d | 2022-06-19T19:06:24.000Z | [
"pytorch",
"bert",
"text-classification",
"en",
"dataset:twhitehurst3/autotrain-data-blaze_text_classification",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | twhitehurst3 | null | twhitehurst3/autotrain-blaze_text_classification-1004733283 | 5 | null | transformers | 17,458 | |
strnlz/distilbert-base-uncased-finetuned-sst2 | 9b1646b7505f7f35ea6ab39607df8454dff050cd | 2022-06-21T05:01:59.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers"
] | text-classification | false | strnlz | null | strnlz/distilbert-base-uncased-finetuned-sst2 | 5 | null | transformers | 17,459 | Entry not found |
Alireza1044/MobileBERT_Theseus-qqp | dc08e46f2233ba5cac8935e44a0e15598bec73be | 2022-06-20T18:38:18.000Z | [
"pytorch",
"mobilebert",
"text-classification",
"transformers"
] | text-classification | false | Alireza1044 | null | Alireza1044/MobileBERT_Theseus-qqp | 5 | null | transformers | 17,460 | Entry not found |
davidcechak/DNADeberta_fine_ | a72b31adeee7025d8c48426b2126b9e384d5ef7a | 2022-06-21T15:39:44.000Z | [
"pytorch",
"deberta",
"text-classification",
"transformers"
] | text-classification | false | davidcechak | null | davidcechak/DNADeberta_fine_ | 5 | null | transformers | 17,461 | Entry not found |
PontifexMaximus/Turkish2 | 7b1aa7f746e0ba436366bd79dea5546dbb9214f6 | 2022-07-09T05:48:46.000Z | [
"pytorch",
"marian",
"text2text-generation",
"dataset:opus_infopankki",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | PontifexMaximus | null | PontifexMaximus/Turkish2 | 5 | null | transformers | 17,462 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- opus_infopankki
metrics:
- bleu
model-index:
- name: opus-mt-tr-en-finetuned-tr-to-en
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: opus_infopankki
type: opus_infopankki
args: en-tr
metrics:
- name: Bleu
type: bleu
value: 56.617
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# opus-mt-tr-en-finetuned-tr-to-en
This model is a fine-tuned version of [Helsinki-NLP/opus-mt-tr-en](https://huggingface.co/Helsinki-NLP/opus-mt-tr-en) on the opus_infopankki dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6321
- Bleu: 56.617
- Gen Len: 13.5983
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-06
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|
| No log | 1.0 | 241 | 1.2487 | 41.0053 | 13.0461 |
| No log | 2.0 | 482 | 1.1630 | 43.1077 | 13.0386 |
| 1.4091 | 3.0 | 723 | 1.0992 | 44.6583 | 13.0445 |
| 1.4091 | 4.0 | 964 | 1.0463 | 45.5931 | 13.0289 |
| 1.2325 | 5.0 | 1205 | 1.0012 | 46.7039 | 12.9998 |
| 1.2325 | 6.0 | 1446 | 0.9610 | 47.6783 | 13.0274 |
| 1.1284 | 7.0 | 1687 | 0.9262 | 48.622 | 12.9866 |
| 1.1284 | 8.0 | 1928 | 0.8939 | 48.4984 | 13.5762 |
| 1.0486 | 9.0 | 2169 | 0.8642 | 49.1496 | 13.5918 |
| 1.0486 | 10.0 | 2410 | 0.8391 | 49.8875 | 13.5905 |
| 0.9866 | 11.0 | 2651 | 0.8150 | 50.6447 | 13.5803 |
| 0.9866 | 12.0 | 2892 | 0.7941 | 51.2059 | 13.5731 |
| 0.9362 | 13.0 | 3133 | 0.7741 | 51.7071 | 13.5754 |
| 0.9362 | 14.0 | 3374 | 0.7564 | 52.4185 | 13.5781 |
| 0.8928 | 15.0 | 3615 | 0.7398 | 53.0814 | 13.5744 |
| 0.8928 | 16.0 | 3856 | 0.7247 | 53.5711 | 13.5783 |
| 0.8598 | 17.0 | 4097 | 0.7111 | 54.0559 | 13.568 |
| 0.8598 | 18.0 | 4338 | 0.6988 | 54.5188 | 13.5598 |
| 0.8274 | 19.0 | 4579 | 0.6876 | 54.78 | 13.5765 |
| 0.8274 | 20.0 | 4820 | 0.6780 | 55.1494 | 13.5762 |
| 0.8086 | 21.0 | 5061 | 0.6688 | 55.5813 | 13.5788 |
| 0.8086 | 22.0 | 5302 | 0.6610 | 55.6403 | 13.5796 |
| 0.7878 | 23.0 | 5543 | 0.6539 | 55.7731 | 13.5989 |
| 0.7878 | 24.0 | 5784 | 0.6483 | 55.9956 | 13.593 |
| 0.7718 | 25.0 | 6025 | 0.6432 | 56.2303 | 13.5904 |
| 0.7718 | 26.0 | 6266 | 0.6390 | 56.4825 | 13.5975 |
| 0.7633 | 27.0 | 6507 | 0.6360 | 56.5334 | 13.5958 |
| 0.7633 | 28.0 | 6748 | 0.6338 | 56.5357 | 13.5965 |
| 0.7633 | 29.0 | 6989 | 0.6325 | 56.5862 | 13.5974 |
| 0.7584 | 30.0 | 7230 | 0.6321 | 56.617 | 13.5983 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0
- Datasets 2.3.2
- Tokenizers 0.12.1
|
dunlp/GWW-finetuned-cola | 10889c75b6f9bc7d4b786d591e5048632ecda6f9 | 2022-06-21T13:03:24.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"model-index"
] | text-classification | false | dunlp | null | dunlp/GWW-finetuned-cola | 5 | null | transformers | 17,463 | ---
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: GWW-finetuned-cola
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.16962352015480656
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# GWW-finetuned-cola
This model is a fine-tuned version of [dunlp/GWW](https://huggingface.co/dunlp/GWW) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6609
- Matthews Correlation: 0.1696
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.6181 | 1.0 | 535 | 0.6585 | 0.0 |
| 0.5938 | 2.0 | 1070 | 0.6276 | 0.0511 |
| 0.5241 | 3.0 | 1605 | 0.6609 | 0.1696 |
| 0.4433 | 4.0 | 2140 | 0.8239 | 0.1432 |
| 0.3492 | 5.0 | 2675 | 0.9236 | 0.1351 |
### Framework versions
- Transformers 4.20.0
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Jeevesh8/std_0pnt2_bert_ft_cola-71 | 4c9b54416f7b8653916b5863beb2553ea3dca86e | 2022-06-21T13:28:34.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_0pnt2_bert_ft_cola-71 | 5 | null | transformers | 17,464 | Entry not found |
Jeevesh8/std_0pnt2_bert_ft_cola-68 | fffa2a342887dd61ca879d0b379832e9e2d9d3ea | 2022-06-21T13:30:39.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_0pnt2_bert_ft_cola-68 | 5 | null | transformers | 17,465 | Entry not found |
Jeevesh8/std_0pnt2_bert_ft_cola-77 | bddceea5d70278a61606b4beaa1944a02b70acf1 | 2022-06-21T13:28:04.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/std_0pnt2_bert_ft_cola-77 | 5 | null | transformers | 17,466 | Entry not found |
deepesh0x/autotrain-mlsec-1013333726 | 029432a37633676073c8ebd22d6ffd4793fc581f | 2022-06-21T20:49:59.000Z | [
"pytorch",
"julien",
"text-classification",
"en",
"dataset:deepesh0x/autotrain-data-mlsec",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | deepesh0x | null | deepesh0x/autotrain-mlsec-1013333726 | 5 | null | transformers | 17,467 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- deepesh0x/autotrain-data-mlsec
co2_eq_emissions: 33.183779535405364
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 1013333726
- CO2 Emissions (in grams): 33.183779535405364
## Validation Metrics
- Loss: 0.1998898833990097
- Accuracy: 0.9226923076923077
- Precision: 0.9269808389435525
- Recall: 0.9177134068187645
- AUC: 0.9785380985232148
- F1: 0.9223238438747907
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/deepesh0x/autotrain-mlsec-1013333726
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("deepesh0x/autotrain-mlsec-1013333726", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("deepesh0x/autotrain-mlsec-1013333726", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
deepesh0x/autotrain-GlueFineTunedModel-1013533798 | 11592e83736afb3be90e7638d7d5d157ee598f57 | 2022-06-21T18:16:42.000Z | [
"pytorch",
"bert",
"text-classification",
"unk",
"dataset:deepesh0x/autotrain-data-GlueFineTunedModel",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | deepesh0x | null | deepesh0x/autotrain-GlueFineTunedModel-1013533798 | 5 | null | transformers | 17,468 | ---
tags: autotrain
language: unk
widget:
- text: "I love AutoTrain 🤗"
datasets:
- deepesh0x/autotrain-data-GlueFineTunedModel
co2_eq_emissions: 56.65990763623749
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 1013533798
- CO2 Emissions (in grams): 56.65990763623749
## Validation Metrics
- Loss: 0.693366527557373
- Accuracy: 0.4998717948717949
- Precision: 0.0
- Recall: 0.0
- AUC: 0.5
- F1: 0.0
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/deepesh0x/autotrain-GlueFineTunedModel-1013533798
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("deepesh0x/autotrain-GlueFineTunedModel-1013533798", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("deepesh0x/autotrain-GlueFineTunedModel-1013533798", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
Alireza1044/MobileBERT_Theseus-qnli | 9d0f49d1c92f05cdcb29303726b38c4a5bd7bb3d | 2022-06-21T21:11:07.000Z | [
"pytorch",
"mobilebert",
"text-classification",
"transformers"
] | text-classification | false | Alireza1044 | null | Alireza1044/MobileBERT_Theseus-qnli | 5 | null | transformers | 17,469 | Entry not found |
Elron/deberta-v3-large-offensive | 77c18928a0efae7927043beab4a8c8036741ae6f | 2022-06-22T09:47:41.000Z | [
"pytorch",
"deberta-v2",
"text-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
] | text-classification | false | Elron | null | Elron/deberta-v3-large-offensive | 5 | null | transformers | 17,470 | ---
license: mit
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: deberta-v3-large
results: []
---
# deberta-v3-large-sentiment
This model is a fine-tuned version of [microsoft/deberta-v3-large](https://huggingface.co/microsoft/deberta-v3-large) on an [tweet_eval](https://huggingface.co/datasets/tweet_eval) dataset.
## Model description
Test set results:
| Model | Emotion | Hate | Irony | Offensive | Sentiment |
| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- |
| deberta-v3-large | **86.3** | **61.3** | **87.1** | **86.4** | **73.9** |
| BERTweet | 79.3 | - | 82.1 | 79.5 | 73.4 |
| RoB-RT | 79.5 | 52.3 | 61.7 | 80.5 | 69.3 |
[source:papers_with_code](https://paperswithcode.com/sota/sentiment-analysis-on-tweeteval)
## Intended uses & limitations
Classifying attributes of interest on tweeter like data.
## Training and evaluation data
[tweet_eval](https://huggingface.co/datasets/tweet_eval) dataset.
## Training procedure
Fine tuned and evaluated with [run_glue.py]()
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-06
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 50
- num_epochs: 10.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6417 | 0.27 | 100 | 0.6283 | 0.6533 |
| 0.5105 | 0.54 | 200 | 0.4588 | 0.7915 |
| 0.4554 | 0.81 | 300 | 0.4500 | 0.7968 |
| 0.4212 | 1.08 | 400 | 0.4773 | 0.7938 |
| 0.4054 | 1.34 | 500 | 0.4311 | 0.7983 |
| 0.3922 | 1.61 | 600 | 0.4588 | 0.7998 |
| 0.3776 | 1.88 | 700 | 0.4367 | 0.8066 |
| 0.3535 | 2.15 | 800 | 0.4675 | 0.8074 |
| 0.33 | 2.42 | 900 | 0.4874 | 0.8021 |
| 0.3113 | 2.69 | 1000 | 0.4949 | 0.8044 |
| 0.3203 | 2.96 | 1100 | 0.4550 | 0.8059 |
| 0.248 | 3.23 | 1200 | 0.4858 | 0.8036 |
| 0.2478 | 3.49 | 1300 | 0.5299 | 0.8029 |
| 0.2371 | 3.76 | 1400 | 0.5013 | 0.7991 |
| 0.2388 | 4.03 | 1500 | 0.5520 | 0.8021 |
| 0.1744 | 4.3 | 1600 | 0.6687 | 0.7915 |
| 0.1788 | 4.57 | 1700 | 0.7560 | 0.7689 |
| 0.1652 | 4.84 | 1800 | 0.6985 | 0.7832 |
| 0.1596 | 5.11 | 1900 | 0.7191 | 0.7915 |
| 0.1214 | 5.38 | 2000 | 0.9097 | 0.7893 |
| 0.1432 | 5.64 | 2100 | 0.9184 | 0.7787 |
| 0.1145 | 5.91 | 2200 | 0.9620 | 0.7878 |
| 0.1069 | 6.18 | 2300 | 0.9489 | 0.7893 |
| 0.1012 | 6.45 | 2400 | 1.0107 | 0.7817 |
| 0.0942 | 6.72 | 2500 | 1.0021 | 0.7885 |
| 0.087 | 6.99 | 2600 | 1.1090 | 0.7915 |
| 0.0598 | 7.26 | 2700 | 1.1735 | 0.7795 |
| 0.0742 | 7.53 | 2800 | 1.1433 | 0.7817 |
| 0.073 | 7.79 | 2900 | 1.1343 | 0.7953 |
| 0.0553 | 8.06 | 3000 | 1.2258 | 0.7840 |
| 0.0474 | 8.33 | 3100 | 1.2461 | 0.7817 |
| 0.0515 | 8.6 | 3200 | 1.2996 | 0.7825 |
| 0.0551 | 8.87 | 3300 | 1.2819 | 0.7855 |
| 0.0541 | 9.14 | 3400 | 1.2808 | 0.7855 |
| 0.0465 | 9.41 | 3500 | 1.3398 | 0.7817 |
| 0.0407 | 9.68 | 3600 | 1.3231 | 0.7825 |
| 0.0343 | 9.94 | 3700 | 1.3330 | 0.7825 |
### Framework versions
- Transformers 4.20.0.dev0
- Pytorch 1.9.0
- Datasets 2.2.2
- Tokenizers 0.11.6
|
Elron/deberta-v3-large-sentiment | 6621c4c995ae121abdab2761edd71bae9abd7da1 | 2022-06-22T09:45:55.000Z | [
"pytorch",
"deberta-v2",
"text-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
] | text-classification | false | Elron | null | Elron/deberta-v3-large-sentiment | 5 | null | transformers | 17,471 | ---
license: mit
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: deberta-v3-large
results: []
---
# deberta-v3-large-sentiment
This model is a fine-tuned version of [microsoft/deberta-v3-large](https://huggingface.co/microsoft/deberta-v3-large) on an [tweet_eval](https://huggingface.co/datasets/tweet_eval) dataset.
## Model description
Test set results:
| Model | Emotion | Hate | Irony | Offensive | Sentiment |
| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- |
| deberta-v3-large | **86.3** | **61.3** | **87.1** | **86.4** | **73.9** |
| BERTweet | 79.3 | - | 82.1 | 79.5 | 73.4 |
| RoB-RT | 79.5 | 52.3 | 61.7 | 80.5 | 69.3 |
[source:papers_with_code](https://paperswithcode.com/sota/sentiment-analysis-on-tweeteval)
## Intended uses & limitations
Classifying attributes of interest on tweeter like data.
## Training and evaluation data
[tweet_eval](https://huggingface.co/datasets/tweet_eval) dataset.
## Training procedure
Fine tuned and evaluated with [run_glue.py]()
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-06
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 50
- num_epochs: 10.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 1.0614 | 0.07 | 100 | 1.0196 | 0.4345 |
| 0.8601 | 0.14 | 200 | 0.7561 | 0.6460 |
| 0.734 | 0.21 | 300 | 0.6796 | 0.6955 |
| 0.6753 | 0.28 | 400 | 0.6521 | 0.7000 |
| 0.6408 | 0.35 | 500 | 0.6119 | 0.7440 |
| 0.5991 | 0.42 | 600 | 0.6034 | 0.7370 |
| 0.6069 | 0.49 | 700 | 0.5976 | 0.7375 |
| 0.6122 | 0.56 | 800 | 0.5871 | 0.7425 |
| 0.5908 | 0.63 | 900 | 0.5935 | 0.7445 |
| 0.5884 | 0.7 | 1000 | 0.5792 | 0.7520 |
| 0.5839 | 0.77 | 1100 | 0.5780 | 0.7555 |
| 0.5772 | 0.84 | 1200 | 0.5727 | 0.7570 |
| 0.5895 | 0.91 | 1300 | 0.5601 | 0.7550 |
| 0.5757 | 0.98 | 1400 | 0.5613 | 0.7525 |
| 0.5121 | 1.05 | 1500 | 0.5867 | 0.7600 |
| 0.5254 | 1.12 | 1600 | 0.5595 | 0.7630 |
| 0.5074 | 1.19 | 1700 | 0.5594 | 0.7585 |
| 0.4947 | 1.26 | 1800 | 0.5697 | 0.7575 |
| 0.5019 | 1.33 | 1900 | 0.5665 | 0.7580 |
| 0.5005 | 1.4 | 2000 | 0.5484 | 0.7655 |
| 0.5125 | 1.47 | 2100 | 0.5626 | 0.7605 |
| 0.5241 | 1.54 | 2200 | 0.5561 | 0.7560 |
| 0.5198 | 1.61 | 2300 | 0.5602 | 0.7600 |
| 0.5124 | 1.68 | 2400 | 0.5654 | 0.7490 |
| 0.5096 | 1.75 | 2500 | 0.5803 | 0.7515 |
| 0.4885 | 1.82 | 2600 | 0.5889 | 0.75 |
| 0.5111 | 1.89 | 2700 | 0.5508 | 0.7665 |
| 0.4868 | 1.96 | 2800 | 0.5621 | 0.7635 |
| 0.4599 | 2.04 | 2900 | 0.5995 | 0.7615 |
| 0.4147 | 2.11 | 3000 | 0.6202 | 0.7530 |
| 0.4233 | 2.18 | 3100 | 0.5875 | 0.7625 |
| 0.4324 | 2.25 | 3200 | 0.5794 | 0.7610 |
| 0.4141 | 2.32 | 3300 | 0.5902 | 0.7460 |
| 0.4306 | 2.39 | 3400 | 0.6053 | 0.7545 |
| 0.4266 | 2.46 | 3500 | 0.5979 | 0.7570 |
| 0.4227 | 2.53 | 3600 | 0.5920 | 0.7650 |
| 0.4226 | 2.6 | 3700 | 0.6166 | 0.7455 |
| 0.3978 | 2.67 | 3800 | 0.6126 | 0.7560 |
| 0.3954 | 2.74 | 3900 | 0.6152 | 0.7550 |
| 0.4209 | 2.81 | 4000 | 0.5980 | 0.75 |
| 0.3982 | 2.88 | 4100 | 0.6096 | 0.7490 |
| 0.4016 | 2.95 | 4200 | 0.6541 | 0.7425 |
| 0.3966 | 3.02 | 4300 | 0.6377 | 0.7545 |
| 0.3074 | 3.09 | 4400 | 0.6860 | 0.75 |
| 0.3551 | 3.16 | 4500 | 0.6160 | 0.7550 |
| 0.3323 | 3.23 | 4600 | 0.6714 | 0.7520 |
| 0.3171 | 3.3 | 4700 | 0.6538 | 0.7535 |
| 0.3403 | 3.37 | 4800 | 0.6774 | 0.7465 |
| 0.3396 | 3.44 | 4900 | 0.6726 | 0.7465 |
| 0.3259 | 3.51 | 5000 | 0.6465 | 0.7480 |
| 0.3392 | 3.58 | 5100 | 0.6860 | 0.7460 |
| 0.3251 | 3.65 | 5200 | 0.6697 | 0.7495 |
| 0.3253 | 3.72 | 5300 | 0.6770 | 0.7430 |
| 0.3455 | 3.79 | 5400 | 0.7177 | 0.7360 |
| 0.3323 | 3.86 | 5500 | 0.6943 | 0.7400 |
| 0.3335 | 3.93 | 5600 | 0.6507 | 0.7555 |
| 0.3368 | 4.0 | 5700 | 0.6580 | 0.7485 |
| 0.2479 | 4.07 | 5800 | 0.7667 | 0.7430 |
| 0.2613 | 4.14 | 5900 | 0.7513 | 0.7505 |
| 0.2557 | 4.21 | 6000 | 0.7927 | 0.7485 |
| 0.243 | 4.28 | 6100 | 0.7792 | 0.7450 |
| 0.2473 | 4.35 | 6200 | 0.8107 | 0.7355 |
| 0.2447 | 4.42 | 6300 | 0.7851 | 0.7370 |
| 0.2515 | 4.49 | 6400 | 0.7529 | 0.7465 |
| 0.274 | 4.56 | 6500 | 0.7390 | 0.7465 |
| 0.2674 | 4.63 | 6600 | 0.7658 | 0.7460 |
| 0.2416 | 4.7 | 6700 | 0.7915 | 0.7485 |
| 0.2432 | 4.77 | 6800 | 0.7989 | 0.7435 |
| 0.2595 | 4.84 | 6900 | 0.7850 | 0.7380 |
| 0.2736 | 4.91 | 7000 | 0.7577 | 0.7395 |
| 0.2783 | 4.98 | 7100 | 0.7650 | 0.7405 |
| 0.2304 | 5.05 | 7200 | 0.8542 | 0.7385 |
| 0.1937 | 5.12 | 7300 | 0.8390 | 0.7345 |
| 0.1878 | 5.19 | 7400 | 0.9150 | 0.7330 |
| 0.1921 | 5.26 | 7500 | 0.8792 | 0.7405 |
| 0.1916 | 5.33 | 7600 | 0.8892 | 0.7410 |
| 0.2011 | 5.4 | 7700 | 0.9012 | 0.7325 |
| 0.211 | 5.47 | 7800 | 0.8608 | 0.7420 |
| 0.2194 | 5.54 | 7900 | 0.8852 | 0.7320 |
| 0.205 | 5.61 | 8000 | 0.8803 | 0.7385 |
| 0.1981 | 5.68 | 8100 | 0.8681 | 0.7330 |
| 0.1908 | 5.75 | 8200 | 0.9020 | 0.7435 |
| 0.1942 | 5.82 | 8300 | 0.8780 | 0.7410 |
| 0.1958 | 5.89 | 8400 | 0.8937 | 0.7345 |
| 0.1883 | 5.96 | 8500 | 0.9121 | 0.7360 |
| 0.1819 | 6.04 | 8600 | 0.9409 | 0.7430 |
| 0.145 | 6.11 | 8700 | 1.1390 | 0.7265 |
| 0.1696 | 6.18 | 8800 | 0.9189 | 0.7430 |
| 0.1488 | 6.25 | 8900 | 0.9718 | 0.7400 |
| 0.1637 | 6.32 | 9000 | 0.9702 | 0.7450 |
| 0.1547 | 6.39 | 9100 | 1.0033 | 0.7410 |
| 0.1605 | 6.46 | 9200 | 0.9973 | 0.7355 |
| 0.1552 | 6.53 | 9300 | 1.0491 | 0.7290 |
| 0.1731 | 6.6 | 9400 | 1.0271 | 0.7335 |
| 0.1738 | 6.67 | 9500 | 0.9575 | 0.7430 |
| 0.1669 | 6.74 | 9600 | 0.9614 | 0.7350 |
| 0.1347 | 6.81 | 9700 | 1.0263 | 0.7365 |
| 0.1593 | 6.88 | 9800 | 1.0173 | 0.7360 |
| 0.1549 | 6.95 | 9900 | 1.0398 | 0.7350 |
| 0.1675 | 7.02 | 10000 | 0.9975 | 0.7380 |
| 0.1182 | 7.09 | 10100 | 1.1059 | 0.7350 |
| 0.1351 | 7.16 | 10200 | 1.0933 | 0.7400 |
| 0.1496 | 7.23 | 10300 | 1.0731 | 0.7355 |
| 0.1197 | 7.3 | 10400 | 1.1089 | 0.7360 |
| 0.1111 | 7.37 | 10500 | 1.1381 | 0.7405 |
| 0.1494 | 7.44 | 10600 | 1.0252 | 0.7425 |
| 0.1235 | 7.51 | 10700 | 1.0906 | 0.7360 |
| 0.133 | 7.58 | 10800 | 1.1796 | 0.7375 |
| 0.1248 | 7.65 | 10900 | 1.1332 | 0.7420 |
| 0.1268 | 7.72 | 11000 | 1.1304 | 0.7415 |
| 0.1368 | 7.79 | 11100 | 1.1345 | 0.7380 |
| 0.1228 | 7.86 | 11200 | 1.2018 | 0.7320 |
| 0.1281 | 7.93 | 11300 | 1.1884 | 0.7350 |
| 0.1449 | 8.0 | 11400 | 1.1571 | 0.7345 |
| 0.1025 | 8.07 | 11500 | 1.1538 | 0.7345 |
| 0.1199 | 8.14 | 11600 | 1.2113 | 0.7390 |
| 0.1016 | 8.21 | 11700 | 1.2882 | 0.7370 |
| 0.114 | 8.28 | 11800 | 1.2872 | 0.7390 |
| 0.1019 | 8.35 | 11900 | 1.2876 | 0.7380 |
| 0.1142 | 8.42 | 12000 | 1.2791 | 0.7385 |
| 0.1135 | 8.49 | 12100 | 1.2883 | 0.7380 |
| 0.1139 | 8.56 | 12200 | 1.2829 | 0.7360 |
| 0.1107 | 8.63 | 12300 | 1.2698 | 0.7365 |
| 0.1183 | 8.7 | 12400 | 1.2660 | 0.7345 |
| 0.1064 | 8.77 | 12500 | 1.2889 | 0.7365 |
| 0.0895 | 8.84 | 12600 | 1.3480 | 0.7330 |
| 0.1244 | 8.91 | 12700 | 1.2872 | 0.7325 |
| 0.1209 | 8.98 | 12800 | 1.2681 | 0.7375 |
| 0.1144 | 9.05 | 12900 | 1.2711 | 0.7370 |
| 0.1034 | 9.12 | 13000 | 1.2801 | 0.7360 |
| 0.113 | 9.19 | 13100 | 1.2801 | 0.7350 |
| 0.0994 | 9.26 | 13200 | 1.2920 | 0.7360 |
| 0.0966 | 9.33 | 13300 | 1.2761 | 0.7335 |
| 0.0939 | 9.4 | 13400 | 1.2909 | 0.7365 |
| 0.0975 | 9.47 | 13500 | 1.2953 | 0.7360 |
| 0.0842 | 9.54 | 13600 | 1.3179 | 0.7335 |
| 0.0871 | 9.61 | 13700 | 1.3149 | 0.7385 |
| 0.1162 | 9.68 | 13800 | 1.3124 | 0.7350 |
| 0.085 | 9.75 | 13900 | 1.3207 | 0.7355 |
| 0.0966 | 9.82 | 14000 | 1.3248 | 0.7335 |
| 0.1064 | 9.89 | 14100 | 1.3261 | 0.7335 |
| 0.1046 | 9.96 | 14200 | 1.3255 | 0.7360 |
### Framework versions
- Transformers 4.20.0.dev0
- Pytorch 1.9.0
- Datasets 2.2.2
- Tokenizers 0.11.6
|
lmqg/bart-base-squadshifts-vanilla-reddit | 0b1a394d09d92c0383cccef45ee3a12f4ebfe6d8 | 2022-06-22T10:48:42.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | lmqg | null | lmqg/bart-base-squadshifts-vanilla-reddit | 5 | null | transformers | 17,472 | Entry not found |
lmqg/bart-large-squadshifts-vanilla-reddit | 934ba96c1a60c6e25da207ae49c20d60e0f76901 | 2022-06-22T10:57:36.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | lmqg | null | lmqg/bart-large-squadshifts-vanilla-reddit | 5 | null | transformers | 17,473 | Entry not found |
Mizew/autotrain-avar-1016534299 | cb0624f159dbe25e869f4aa01ec36c263da07582 | 2022-06-22T12:12:07.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"en",
"es",
"dataset:Mizew/autotrain-data-avar",
"transformers",
"autotrain",
"translation",
"co2_eq_emissions",
"autotrain_compatible"
] | translation | false | Mizew | null | Mizew/autotrain-avar-1016534299 | 5 | null | transformers | 17,474 | ---
tags:
- autotrain
- translation
language:
- en
- es
datasets:
- Mizew/autotrain-data-avar
co2_eq_emissions: 0.07815966018818815
---
# Model Trained Using AutoTrain
- Problem type: Translation
- Model ID: 1016534299
- CO2 Emissions (in grams): 0.07815966018818815
## Validation Metrics
- Loss: 0.9978321194648743
- SacreBLEU: 13.8459
- Gen len: 6.0588 |
mmillet/xlm-roberta-base_single_finetuned_on_cedr_augmented | 71f2f122c1c37fe396b60fb2dca90a402c46275c | 2022-06-22T18:01:45.000Z | [
"pytorch",
"tensorboard",
"xlm-roberta",
"text-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
] | text-classification | false | mmillet | null | mmillet/xlm-roberta-base_single_finetuned_on_cedr_augmented | 5 | null | transformers | 17,475 | ---
license: mit
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
- precision
- recall
model-index:
- name: xlm-roberta-base_single_finetuned_on_cedr_augmented
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base_single_finetuned_on_cedr_augmented
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4650
- Accuracy: 0.8820
- F1: 0.8814
- Precision: 0.8871
- Recall: 0.8820
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
- lr_scheduler_type: linear
- num_epochs: 20
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|
| 0.8868 | 1.0 | 69 | 0.4939 | 0.8403 | 0.8376 | 0.8431 | 0.8403 |
| 0.4248 | 2.0 | 138 | 0.3969 | 0.8779 | 0.8768 | 0.8798 | 0.8779 |
| 0.3197 | 3.0 | 207 | 0.4019 | 0.8758 | 0.8757 | 0.8758 | 0.8758 |
| 0.2737 | 4.0 | 276 | 0.3915 | 0.8831 | 0.8827 | 0.8847 | 0.8831 |
| 0.2053 | 5.0 | 345 | 0.4445 | 0.8643 | 0.8650 | 0.8714 | 0.8643 |
| 0.1705 | 6.0 | 414 | 0.4650 | 0.8820 | 0.8814 | 0.8871 | 0.8820 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
cestwc/roberta-large | 38b748c75df155c9a11b5230ad13520e56db68e4 | 2022-06-23T12:10:28.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | false | cestwc | null | cestwc/roberta-large | 5 | null | transformers | 17,476 | Entry not found |
Sayan01/tiny-bert-wnli-distilled | bd412120e47796517b66c6f478441b7319a7db7e | 2022-06-30T15:31:49.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Sayan01 | null | Sayan01/tiny-bert-wnli-distilled | 5 | null | transformers | 17,477 | Entry not found |
Lucifer-nick/hsqcSmiles | 1c5ca771658f45aa2a8426c2f5fc598f4d69eb66 | 2022-06-24T03:49:31.000Z | [
"pytorch",
"transformers",
"license:apache-2.0"
] | null | false | Lucifer-nick | null | Lucifer-nick/hsqcSmiles | 5 | null | transformers | 17,478 | ---
license: apache-2.0
---
|
IsaMaks/distilbert-base-uncased-finetuned-ner | 4a3bad3bf09468a79a74641c34b0ccbbc2108e3a | 2022-07-06T14:48:51.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"token-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | token-classification | false | IsaMaks | null | IsaMaks/distilbert-base-uncased-finetuned-ner | 5 | null | transformers | 17,479 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: distilbert-base-uncased-finetuned-ner
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-ner
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8874
- Precision: 0.2534
- Recall: 0.3333
- F1: 0.2879
- Accuracy: 0.7603
- True predictions: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- True labels: [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 0, 0, 1, 2, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 0, 1, 2, 2, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | True predictions | True labels |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
| No log | 1.0 | 2 | 0.9937 | 0.2839 | 0.3072 | 0.2951 | 0.6712 | [0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2] | [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 0, 0, 1, 2, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 0, 1, 2, 2, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] |
| No log | 2.0 | 4 | 0.9155 | 0.2523 | 0.3273 | 0.2850 | 0.7466 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] | [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 0, 0, 1, 2, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 0, 1, 2, 2, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] |
| No log | 3.0 | 6 | 0.8874 | 0.2534 | 0.3333 | 0.2879 | 0.7603 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] | [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 0, 0, 1, 2, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 0, 1, 2, 2, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
constructor/chinese-roberta-wwm-ext-large | 52172128bb11d1c79fb114ca9428c2a2c6457766 | 2022-06-26T08:09:51.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | constructor | null | constructor/chinese-roberta-wwm-ext-large | 5 | null | transformers | 17,480 | Entry not found |
ABDPOOR/pft-clf-finetuned | 769922ff8695df1628cc8fb1c4ad5efbbbafbc6d | 2022-06-26T12:49:43.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | ABDPOOR | null | ABDPOOR/pft-clf-finetuned | 5 | null | transformers | 17,481 | Entry not found |
Dorin/DialoGPT-small-Rick | 86797e8c6418cd101c7e6c07e0c5edd038082154 | 2022-06-26T17:47:02.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Dorin | null | Dorin/DialoGPT-small-Rick | 5 | null | transformers | 17,482 | ---
tags:
- conversational
---
# Rick and Morty DialoGPT Model |
vaibhavagg303/Bart-Multilingual | 455dcfcbad79b339186634acd3efd2b1c157a046 | 2022-06-27T02:44:25.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | vaibhavagg303 | null | vaibhavagg303/Bart-Multilingual | 5 | null | transformers | 17,483 | Entry not found |
kaisuke/finetuning-sentiment-model-3000-samples | 3ac5da4dc843484f9698dfd96735b410521c0253 | 2022-06-26T21:39:32.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"dataset:imdb",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | kaisuke | null | kaisuke/finetuning-sentiment-model-3000-samples | 5 | null | transformers | 17,484 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- imdb
metrics:
- accuracy
- f1
model-index:
- name: finetuning-sentiment-model-3000-samples
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: imdb
type: imdb
args: plain_text
metrics:
- name: Accuracy
type: accuracy
value: 0.87
- name: F1
type: f1
value: 0.8695652173913044
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetuning-sentiment-model-3000-samples
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3120
- Accuracy: 0.87
- F1: 0.8696
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Shanny/dbgbert-finetuned-squad | a37bf2f42c09efe0728d6b044f725fab251a0639 | 2022-06-28T15:28:28.000Z | [
"pytorch",
"tensorboard",
"bert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | question-answering | false | Shanny | null | Shanny/dbgbert-finetuned-squad | 5 | null | transformers | 17,485 | ---
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: dbgbert-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# dbgbert-finetuned-squad
This model was trained from scratch on the squad dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
plncmm/gpt2-wl-base-es | 45daaff4f0e07506e29b7fb4aaac17e527a600a1 | 2022-06-27T13:59:01.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-generation | false | plncmm | null | plncmm/gpt2-wl-base-es | 5 | null | transformers | 17,486 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: gpt2-wl-base-es
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# gpt2-wl-base-es
This model is a fine-tuned version of [PlanTL-GOB-ES/gpt2-base-bne](https://huggingface.co/PlanTL-GOB-ES/gpt2-base-bne) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 32
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
### Framework versions
- Transformers 4.21.0.dev0
- Pytorch 1.11.0+cu113
- Datasets 2.3.3.dev0
- Tokenizers 0.12.1
|
chisun/mt5-small-finetuned-amazon-en-es-accelerate3 | 91f536289ba4ea029334bcd8d3e82d824adc20fa | 2022-06-28T00:26:43.000Z | [
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | chisun | null | chisun/mt5-small-finetuned-amazon-en-es-accelerate3 | 5 | null | transformers | 17,487 | Entry not found |
jmwolf27/finetuning-sentiment-model-3000-samples | 064d67d6102ba48091c663b9ce0e56a343dbe9c6 | 2022-06-28T02:19:32.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"dataset:imdb",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | jmwolf27 | null | jmwolf27/finetuning-sentiment-model-3000-samples | 5 | null | transformers | 17,488 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- imdb
metrics:
- accuracy
- f1
model-index:
- name: finetuning-sentiment-model-3000-samples
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: imdb
type: imdb
args: plain_text
metrics:
- name: Accuracy
type: accuracy
value: 0.8766666666666667
- name: F1
type: f1
value: 0.877887788778878
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetuning-sentiment-model-3000-samples
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3167
- Accuracy: 0.8767
- F1: 0.8779
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Aalaa/opt-125m-wikitext2 | ed091de27bfb23a83297b1a29026f9fe071b1ecc | 2022-06-28T22:39:40.000Z | [
"pytorch",
"tensorboard",
"opt",
"text-generation",
"transformers",
"generated_from_trainer",
"license:other",
"model-index"
] | text-generation | false | Aalaa | null | Aalaa/opt-125m-wikitext2 | 5 | null | transformers | 17,489 | ---
license: other
tags:
- generated_from_trainer
model-index:
- name: opt-125m-wikitext2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# opt-125m-wikitext2
This model is a fine-tuned version of [facebook/opt-125m](https://huggingface.co/facebook/opt-125m) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 3.3409
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.4123 | 1.0 | 2370 | 3.3621 |
| 3.2096 | 2.0 | 4740 | 3.3452 |
| 3.0822 | 3.0 | 7110 | 3.3409 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Smith123/tiny-bert-sst2-distilled_L4_H_512 | 5c35f1a0f81f8b9f630ae49a9b0d57a3d26634c2 | 2022-06-29T10:42:39.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Smith123 | null | Smith123/tiny-bert-sst2-distilled_L4_H_512 | 5 | null | transformers | 17,490 | Entry not found |
jdang/bert-finetuned-ner | 7c57ac8c08a13e9a2b666069bbf9f0ddd310e491 | 2022-06-29T22:07:37.000Z | [
"pytorch",
"tensorboard",
"bert",
"token-classification",
"dataset:conll2003",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | token-classification | false | jdang | null | jdang/bert-finetuned-ner | 5 | null | transformers | 17,491 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- conll2003
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: bert-finetuned-ner
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: conll2003
type: conll2003
args: conll2003
metrics:
- name: Precision
type: precision
value: 0.9357509521443947
- name: Recall
type: recall
value: 0.9510265903736116
- name: F1
type: f1
value: 0.9433269343126617
- name: Accuracy
type: accuracy
value: 0.9864160828869135
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-finetuned-ner
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0629
- Precision: 0.9358
- Recall: 0.9510
- F1: 0.9433
- Accuracy: 0.9864
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.0855 | 1.0 | 1756 | 0.0632 | 0.9152 | 0.9387 | 0.9268 | 0.9833 |
| 0.0387 | 2.0 | 3512 | 0.0589 | 0.9322 | 0.9505 | 0.9413 | 0.9859 |
| 0.0193 | 3.0 | 5268 | 0.0629 | 0.9358 | 0.9510 | 0.9433 | 0.9864 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
annahaz/xlm-roberta-base-finetuned-misogyny-sexism-en-it-hi-beng | a66d2d8fdf6083e5f3f299925e11216f0ffb4238 | 2022-06-30T03:31:29.000Z | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
] | text-classification | false | annahaz | null | annahaz/xlm-roberta-base-finetuned-misogyny-sexism-en-it-hi-beng | 5 | null | transformers | 17,492 | ---
license: mit
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
- precision
- recall
model-index:
- name: xlm-roberta-base-finetuned-misogyny-sexism-en-it-hi-beng
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-misogyny-sexism-en-it-hi-beng
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0295
- Accuracy: 0.9924
- F1: 0.9922
- Precision: 0.9845
- Recall: 1.0
- Mae: 0.0076
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | Mae |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:---------:|:------:|:------:|
| 0.3723 | 1.0 | 2778 | 0.4446 | 0.7876 | 0.7967 | 0.7375 | 0.8663 | 0.2124 |
| 0.3257 | 2.0 | 5556 | 0.4372 | 0.8381 | 0.8509 | 0.7634 | 0.9611 | 0.1619 |
| 0.2903 | 3.0 | 8334 | 0.2384 | 0.9044 | 0.9055 | 0.8627 | 0.9526 | 0.0956 |
| 0.244 | 4.0 | 11112 | 0.1500 | 0.9514 | 0.9509 | 0.9245 | 0.9789 | 0.0486 |
| 0.2169 | 5.0 | 13890 | 0.1024 | 0.9717 | 0.9709 | 0.9580 | 0.9842 | 0.0283 |
| 0.1987 | 6.0 | 16668 | 0.0879 | 0.9767 | 0.9762 | 0.9612 | 0.9916 | 0.0233 |
| 0.1659 | 7.0 | 19446 | 0.0557 | 0.9848 | 0.9843 | 0.9812 | 0.9874 | 0.0152 |
| 0.1593 | 8.0 | 22224 | 0.0397 | 0.9894 | 0.9891 | 0.9794 | 0.9989 | 0.0106 |
| 0.1384 | 9.0 | 25002 | 0.0315 | 0.9924 | 0.9922 | 0.9855 | 0.9989 | 0.0076 |
| 0.1186 | 10.0 | 27780 | 0.0295 | 0.9924 | 0.9922 | 0.9845 | 1.0 | 0.0076 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.9.0+cu111
- Datasets 2.3.2
- Tokenizers 0.12.1
|
clevrly/roberta-base-finetuned-hotpot_qa | 9b02511fdee958affb3986dba1ea47425901b364 | 2022-07-01T18:12:44.000Z | [
"pytorch",
"tensorboard",
"roberta",
"question-answering",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] | question-answering | false | clevrly | null | clevrly/roberta-base-finetuned-hotpot_qa | 5 | null | transformers | 17,493 | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: roberta-base-finetuned-hotpot_qa
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-hotpot_qa
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8677
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.6588 | 1.0 | 882 | 0.9653 |
| 0.7777 | 2.0 | 1764 | 0.8677 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
fujiki/gpt-neo-en2ja-1.3b | e7203da00f6e63ad08ef470d4a0a87845429a185 | 2022-06-30T08:26:27.000Z | [
"pytorch",
"gpt_neo",
"text-generation",
"transformers",
"license:afl-3.0"
] | text-generation | false | fujiki | null | fujiki/gpt-neo-en2ja-1.3b | 5 | null | transformers | 17,494 | ---
license: afl-3.0
---
|
pserna/mt5-small-spanish-paraphraser | 5c3ee9be30a3b973f6e07b2edad6896ff188ea2b | 2022-06-30T16:33:25.000Z | [
"pytorch",
"tf",
"mt5",
"text2text-generation",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | text2text-generation | false | pserna | null | pserna/mt5-small-spanish-paraphraser | 5 | null | transformers | 17,495 | ---
license: apache-2.0
---
# mT5-small based spanish paraphraser
### Original model
- [Google's mT5](https://huggingface.co/google/mt5-small)
### Datasets used for training:
- spanish [PAWS-X](https://huggingface.co/datasets/paws-x)
- Custom database: "Poor-man's" translation of [duplicated questions in Quora](https://huggingface.co/datasets/quora) (translated with [Helsinki-NLP/opus-mt-en-es](https://huggingface.co/Helsinki-NLP/opus-mt-en-es))
|
z-dickson/US_politicians_covid_skepticism | 20fa6d364476db90c1a55a86af2d6b2c222cb29b | 2022-07-08T13:13:11.000Z | [
"pytorch",
"tf",
"roberta",
"text-classification",
"transformers",
"generated_from_keras_callback",
"model-index"
] | text-classification | false | z-dickson | null | z-dickson/US_politicians_covid_skepticism | 5 | null | transformers | 17,496 | ---
tags:
- generated_from_keras_callback
model-index:
- name: US_politicians_covid_skepticism
results: []
---
# US_politicians_covid_skepticism
This model is a fine-tuned version of [vinai/bertweet-covid19-base-uncased](https://huggingface.co/vinai/bertweet-covid19-base-uncased) on a dataset of 20,000 handcoded tweets about COVID-19 policies sent by US legislators. The model is trained to identify tweets that are either in support of covid policies (masks, social distancing, lockdowns, vaccine mandates) or are opposed to such policies. Before training the model, all URLs and @Usernames were removed from the tweets. Accuracy is very high (probably) because US legislators tweet a lot of the same messages and retweet each other often. The model is uncased.
It achieves the following results on the evaluation set:
- Train Loss: 0.0141
- Train Sparse Categorical Accuracy: 0.9968
- Validation Loss: 0.0115
- Validation Sparse Categorical Accuracy: 0.9970
- Epoch: 2
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'learning_rate': 5e-07, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Train Sparse Categorical Accuracy | Validation Loss | Validation Sparse Categorical Accuracy | Epoch |
|:----------:|:---------------------------------:|:---------------:|:--------------------------------------:|:-----:|
| 0.1240 | 0.9721 | 0.0206 | 0.9957 | 0 |
| 0.0194 | 0.9957 | 0.0117 | 0.9972 | 1 |
| 0.0141 | 0.9968 | 0.0115 | 0.9970 | 2 |
### Framework versions
- Transformers 4.20.1
- TensorFlow 2.8.2
- Datasets 2.3.2
- Tokenizers 0.12.1
|
annahaz/distilbert-base-multilingual-cased-finetuned-misogyny | 8f5eba6282e6ae1a1d1df3aa23c0860045858ffb | 2022-07-05T18:52:33.000Z | [
"pytorch",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | annahaz | null | annahaz/distilbert-base-multilingual-cased-finetuned-misogyny | 5 | null | transformers | 17,497 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
- precision
- recall
model-index:
- name: distilbert-base-multilingual-cased-finetuned-misogyny
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-multilingual-cased-finetuned-misogyny
This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0045
- Accuracy: 0.9990
- F1: 0.9989
- Precision: 0.9989
- Recall: 0.9989
- Mae: 0.0010
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | Mae |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:---------:|:------:|:------:|
| 0.2987 | 1.0 | 1759 | 0.3910 | 0.8164 | 0.8186 | 0.7793 | 0.8621 | 0.1836 |
| 0.2507 | 2.0 | 3518 | 0.2399 | 0.9029 | 0.9043 | 0.8589 | 0.9547 | 0.0971 |
| 0.1793 | 3.0 | 5277 | 0.1412 | 0.9479 | 0.9483 | 0.9068 | 0.9937 | 0.0521 |
| 0.1062 | 4.0 | 7036 | 0.0570 | 0.9828 | 0.9823 | 0.9702 | 0.9947 | 0.0172 |
| 0.0732 | 5.0 | 8795 | 0.0293 | 0.9924 | 0.9921 | 0.9885 | 0.9958 | 0.0076 |
| 0.0461 | 6.0 | 10554 | 0.0157 | 0.9960 | 0.9958 | 0.9937 | 0.9979 | 0.0040 |
| 0.037 | 7.0 | 12313 | 0.0126 | 0.9975 | 0.9974 | 0.9948 | 1.0 | 0.0025 |
| 0.0311 | 8.0 | 14072 | 0.0092 | 0.9980 | 0.9979 | 0.9958 | 1.0 | 0.0020 |
| 0.0141 | 9.0 | 15831 | 0.0065 | 0.9985 | 0.9984 | 0.9979 | 0.9989 | 0.0015 |
| 0.0119 | 10.0 | 17590 | 0.0045 | 0.9990 | 0.9989 | 0.9989 | 0.9989 | 0.0010 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.9.0+cu111
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Evelyn18/distilbert-base-uncased-becas-5 | 1a5aa9285b6e15fb34d9bc1263466e7a26e007db | 2022-07-02T03:30:01.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:becasv2",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | Evelyn18 | null | Evelyn18/distilbert-base-uncased-becas-5 | 5 | null | transformers | 17,498 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- becasv2
model-index:
- name: distilbert-base-uncased-becas-5
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-becas-5
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset.
It achieves the following results on the evaluation set:
- Loss: 4.8805
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 3e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 5 | 5.4344 |
| No log | 2.0 | 10 | 4.9002 |
| No log | 3.0 | 15 | 4.3601 |
| No log | 4.0 | 20 | 4.4784 |
| No log | 5.0 | 25 | 4.3712 |
| No log | 6.0 | 30 | 4.3958 |
| No log | 7.0 | 35 | 4.8476 |
| No log | 8.0 | 40 | 4.6108 |
| No log | 9.0 | 45 | 4.7711 |
| No log | 10.0 | 50 | 4.8805 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
saekomdalkom/long-t5-local-base-finetuned | 02ee0ec64536637d9a03ccc46681538817c983cf | 2022-07-08T18:48:45.000Z | [
"pytorch",
"longt5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | saekomdalkom | null | saekomdalkom/long-t5-local-base-finetuned | 5 | null | transformers | 17,499 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: long-t5-local-base-finetuned
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# long-t5-local-base-finetuned
This model is a fine-tuned version of [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 9.2722
- Rouge1: 3.8848
- Rouge2: 0.5914
- Rougel: 3.5038
- Rougelsum: 3.7022
- Gen Len: 19.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-06
- train_batch_size: 3
- eval_batch_size: 3
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50000
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:|
| No log | 0.16 | 100 | 342.4395 | 0.0 | 0.0 | 0.0 | 0.0 | 19.0 |
| No log | 0.31 | 200 | 323.6985 | 0.0 | 0.0 | 0.0 | 0.0 | 19.0 |
| No log | 0.47 | 300 | 303.8767 | 0.0 | 0.0 | 0.0 | 0.0 | 19.0 |
| No log | 0.62 | 400 | 284.7559 | 0.0 | 0.0 | 0.0 | 0.0 | 19.0 |
| 295.8376 | 0.78 | 500 | 263.0420 | 0.0 | 0.0 | 0.0 | 0.0 | 19.0 |
| 295.8376 | 0.93 | 600 | 243.2220 | 0.0242 | 0.0 | 0.0223 | 0.0242 | 19.0 |
| 295.8376 | 1.09 | 700 | 224.4514 | 0.0493 | 0.0 | 0.0507 | 0.0513 | 19.0 |
| 295.8376 | 1.24 | 800 | 203.9065 | 0.0656 | 0.0 | 0.0634 | 0.0658 | 19.0 |
| 295.8376 | 1.4 | 900 | 184.8686 | 0.0609 | 0.0 | 0.058 | 0.0616 | 19.0 |
| 199.938 | 1.55 | 1000 | 167.5315 | 0.0638 | 0.0 | 0.0626 | 0.063 | 19.0 |
| 199.938 | 1.71 | 1100 | 151.2369 | 0.0421 | 0.0 | 0.0411 | 0.0413 | 19.0 |
| 199.938 | 1.86 | 1200 | 137.2366 | 0.0358 | 0.0 | 0.0346 | 0.0342 | 19.0 |
| 199.938 | 2.02 | 1300 | 125.3076 | 0.0173 | 0.0 | 0.0157 | 0.0157 | 19.0 |
| 199.938 | 2.17 | 1400 | 114.5600 | 0.0173 | 0.0 | 0.0157 | 0.0157 | 19.0 |
| 136.1309 | 2.33 | 1500 | 105.9237 | 0.0361 | 0.0 | 0.0344 | 0.0363 | 19.0 |
| 136.1309 | 2.48 | 1600 | 97.4123 | 0.0526 | 0.0 | 0.051 | 0.054 | 19.0 |
| 136.1309 | 2.64 | 1700 | 89.0873 | 0.0427 | 0.0 | 0.0407 | 0.0418 | 19.0 |
| 136.1309 | 2.79 | 1800 | 82.0562 | 0.0496 | 0.0 | 0.0462 | 0.0462 | 19.0 |
| 136.1309 | 2.95 | 1900 | 76.2360 | 0.0361 | 0.0 | 0.0345 | 0.0363 | 19.0 |
| 99.2229 | 3.1 | 2000 | 70.0604 | 0.0438 | 0.0 | 0.0425 | 0.0439 | 19.0 |
| 99.2229 | 3.26 | 2100 | 65.1038 | 0.0454 | 0.0 | 0.0441 | 0.0447 | 19.0 |
| 99.2229 | 3.41 | 2200 | 59.1831 | 0.0344 | 0.0 | 0.0318 | 0.0318 | 19.0 |
| 99.2229 | 3.57 | 2300 | 53.0313 | 0.0471 | 0.0 | 0.0448 | 0.0454 | 19.0 |
| 99.2229 | 3.72 | 2400 | 48.2110 | 0.0369 | 0.0 | 0.0369 | 0.0369 | 19.0 |
| 73.4208 | 3.88 | 2500 | 44.2004 | 0.0425 | 0.0 | 0.0427 | 0.044 | 19.0 |
| 73.4208 | 4.03 | 2600 | 40.1925 | 0.0632 | 0.0 | 0.0619 | 0.0612 | 19.0 |
| 73.4208 | 4.19 | 2700 | 36.3698 | 0.0887 | 0.0 | 0.0873 | 0.086 | 19.0 |
| 73.4208 | 4.34 | 2800 | 33.2154 | 0.164 | 0.0 | 0.1652 | 0.1705 | 19.0 |
| 73.4208 | 4.5 | 2900 | 30.9366 | 0.1106 | 0.0 | 0.1138 | 0.1144 | 19.0 |
| 55.6661 | 4.65 | 3000 | 28.5672 | 0.1289 | 0.0 | 0.1295 | 0.131 | 19.0 |
| 55.6661 | 4.81 | 3100 | 27.0910 | 0.2501 | 0.0 | 0.2514 | 0.2527 | 19.0 |
| 55.6661 | 4.96 | 3200 | 25.6666 | 0.318 | 0.0 | 0.3322 | 0.3203 | 19.0 |
| 55.6661 | 5.12 | 3300 | 24.6176 | 0.6319 | 0.0 | 0.6419 | 0.6299 | 19.0 |
| 55.6661 | 5.27 | 3400 | 23.6474 | 1.6632 | 0.0033 | 1.665 | 1.6244 | 19.0 |
| 45.1105 | 5.43 | 3500 | 22.7063 | 3.1374 | 0.0 | 3.1331 | 3.1333 | 19.0 |
| 45.1105 | 5.58 | 3600 | 21.9191 | 5.0757 | 0.0 | 5.0694 | 5.0456 | 19.0 |
| 45.1105 | 5.74 | 3700 | 21.3359 | 5.6576 | 0.0 | 5.689 | 5.6772 | 19.0 |
| 45.1105 | 5.89 | 3800 | 20.6990 | 5.828 | 0.0 | 5.8801 | 5.8688 | 19.0 |
| 45.1105 | 6.05 | 3900 | 20.1800 | 6.3727 | 0.0 | 6.3801 | 6.3716 | 19.0 |
| 39.6923 | 6.2 | 4000 | 19.7415 | 6.2209 | 0.0 | 6.2347 | 6.2368 | 19.0 |
| 39.6923 | 6.36 | 4100 | 19.2800 | 5.7215 | 0.0 | 5.7452 | 5.7295 | 19.0 |
| 39.6923 | 6.51 | 4200 | 18.9683 | 6.1018 | 0.0062 | 6.1 | 6.0935 | 19.0 |
| 39.6923 | 6.67 | 4300 | 18.5776 | 6.0354 | 0.0062 | 6.0227 | 6.0103 | 19.0 |
| 39.6923 | 6.82 | 4400 | 18.2629 | 5.4438 | 0.0062 | 5.441 | 5.4629 | 19.0 |
| 36.1688 | 6.98 | 4500 | 18.0268 | 5.3214 | 0.0091 | 5.3093 | 5.2992 | 19.0 |
| 36.1688 | 7.13 | 4600 | 17.7740 | 5.2223 | 0.0123 | 5.2132 | 5.2084 | 19.0 |
| 36.1688 | 7.29 | 4700 | 17.5345 | 5.178 | 0.0231 | 5.1615 | 5.1243 | 19.0 |
| 36.1688 | 7.44 | 4800 | 17.3846 | 5.3899 | 0.0277 | 5.3414 | 5.3534 | 19.0 |
| 36.1688 | 7.6 | 4900 | 17.1999 | 5.315 | 0.0272 | 5.2572 | 5.2477 | 19.0 |
| 33.5745 | 7.75 | 5000 | 17.0078 | 5.9014 | 0.028 | 5.8181 | 5.8058 | 19.0 |
| 33.5745 | 7.91 | 5100 | 16.6418 | 5.7546 | 0.0242 | 5.6903 | 5.6746 | 19.0 |
| 33.5745 | 8.06 | 5200 | 16.6330 | 6.6893 | 0.0182 | 6.6354 | 6.6178 | 19.0 |
| 33.5745 | 8.22 | 5300 | 16.3423 | 6.1679 | 0.0072 | 6.1518 | 6.128 | 19.0 |
| 33.5745 | 8.37 | 5400 | 16.2373 | 6.7659 | 0.0139 | 6.7271 | 6.7076 | 19.0 |
| 31.9486 | 8.53 | 5500 | 16.1523 | 7.1991 | 0.0139 | 7.1674 | 7.1283 | 19.0 |
| 31.9486 | 8.68 | 5600 | 16.0607 | 7.7042 | 0.0169 | 7.6741 | 7.6537 | 19.0 |
| 31.9486 | 8.84 | 5700 | 15.7647 | 7.1238 | 0.02 | 7.1113 | 7.0586 | 19.0 |
| 31.9486 | 8.99 | 5800 | 15.6194 | 7.3055 | 0.0116 | 7.3311 | 7.2683 | 19.0 |
| 31.9486 | 9.15 | 5900 | 15.4994 | 7.3365 | 0.0139 | 7.3026 | 7.2708 | 19.0 |
| 30.5224 | 9.3 | 6000 | 15.4207 | 8.1959 | 0.0116 | 8.1917 | 8.1651 | 19.0 |
| 30.5224 | 9.46 | 6100 | 15.2981 | 7.7936 | 0.0144 | 7.7826 | 7.7488 | 19.0 |
| 30.5224 | 9.61 | 6200 | 15.2391 | 7.95 | 0.0144 | 7.9371 | 7.895 | 19.0 |
| 30.5224 | 9.77 | 6300 | 15.0941 | 7.1669 | 0.0144 | 7.146 | 7.1251 | 19.0 |
| 30.5224 | 9.92 | 6400 | 14.9979 | 6.2157 | 0.0076 | 6.2086 | 6.1774 | 19.0 |
| 29.1236 | 10.08 | 6500 | 14.9523 | 7.4422 | 0.0137 | 7.3929 | 7.393 | 19.0 |
| 29.1236 | 10.23 | 6600 | 14.9515 | 7.2375 | 0.0137 | 7.1728 | 7.1779 | 19.0 |
| 29.1236 | 10.39 | 6700 | 14.8874 | 7.5071 | 0.0068 | 7.4544 | 7.4739 | 19.0 |
| 29.1236 | 10.54 | 6800 | 14.8057 | 5.9608 | 0.0169 | 5.8754 | 5.8691 | 19.0 |
| 29.1236 | 10.7 | 6900 | 14.6818 | 5.6345 | 0.021 | 5.5422 | 5.5331 | 19.0 |
| 28.314 | 10.85 | 7000 | 14.5409 | 5.5799 | 0.0169 | 5.4915 | 5.4833 | 19.0 |
| 28.314 | 11.01 | 7100 | 14.4512 | 4.3498 | 0.0368 | 4.2243 | 4.2193 | 19.0 |
| 28.314 | 11.16 | 7200 | 14.4560 | 4.0453 | 0.0372 | 3.9481 | 3.9228 | 19.0 |
| 28.314 | 11.32 | 7300 | 14.3851 | 5.1332 | 0.0426 | 5.0186 | 4.9882 | 19.0 |
| 28.314 | 11.47 | 7400 | 14.2265 | 4.8944 | 0.0371 | 4.7869 | 4.7765 | 19.0 |
| 27.5349 | 11.63 | 7500 | 14.1214 | 3.8846 | 0.0335 | 3.7882 | 3.7677 | 19.0 |
| 27.5349 | 11.78 | 7600 | 14.1505 | 3.9992 | 0.0514 | 3.883 | 3.8385 | 19.0 |
| 27.5349 | 11.94 | 7700 | 13.9923 | 3.4526 | 0.0664 | 3.325 | 3.3258 | 19.0 |
| 27.5349 | 12.09 | 7800 | 14.0299 | 2.3086 | 0.0346 | 2.25 | 2.219 | 19.0 |
| 27.5349 | 12.25 | 7900 | 13.9814 | 2.4402 | 0.0628 | 2.3282 | 2.3004 | 19.0 |
| 26.4286 | 12.4 | 8000 | 13.8561 | 2.9869 | 0.0654 | 2.8769 | 2.8485 | 19.0 |
| 26.4286 | 12.56 | 8100 | 13.8259 | 1.9609 | 0.0386 | 1.8863 | 1.8846 | 19.0 |
| 26.4286 | 12.71 | 8200 | 13.8127 | 2.0628 | 0.0355 | 1.9915 | 1.9738 | 19.0 |
| 26.4286 | 12.87 | 8300 | 13.7174 | 1.9904 | 0.081 | 1.888 | 1.9069 | 19.0 |
| 26.4286 | 13.02 | 8400 | 13.6308 | 2.1398 | 0.1055 | 2.0204 | 2.0468 | 19.0 |
| 26.108 | 13.18 | 8500 | 13.6490 | 1.8934 | 0.0788 | 1.7942 | 1.8188 | 19.0 |
| 26.108 | 13.33 | 8600 | 13.5996 | 1.8746 | 0.0901 | 1.7441 | 1.8006 | 19.0 |
| 26.108 | 13.49 | 8700 | 13.5394 | 1.7846 | 0.0895 | 1.6648 | 1.7331 | 19.0 |
| 26.108 | 13.64 | 8800 | 13.5368 | 2.1345 | 0.1287 | 1.9808 | 2.0814 | 19.0 |
| 26.108 | 13.8 | 8900 | 13.4793 | 2.5234 | 0.1611 | 2.3289 | 2.4292 | 19.0 |
| 25.4931 | 13.95 | 9000 | 13.3633 | 2.8056 | 0.1953 | 2.5619 | 2.7088 | 19.0 |
| 25.4931 | 14.11 | 9100 | 13.5182 | 3.087 | 0.2192 | 2.8182 | 2.9928 | 19.0 |
| 25.4931 | 14.26 | 9200 | 13.3372 | 2.6353 | 0.175 | 2.4145 | 2.589 | 19.0 |
| 25.4931 | 14.42 | 9300 | 13.2822 | 2.7577 | 0.1905 | 2.5277 | 2.7215 | 19.0 |
| 25.4931 | 14.57 | 9400 | 13.2011 | 3.1891 | 0.2381 | 2.9276 | 3.142 | 19.0 |
| 24.9241 | 14.73 | 9500 | 13.2201 | 2.609 | 0.1683 | 2.4162 | 2.5905 | 19.0 |
| 24.9241 | 14.88 | 9600 | 13.2206 | 3.1083 | 0.2241 | 2.8627 | 3.0606 | 19.0 |
| 24.9241 | 15.04 | 9700 | 13.2157 | 3.6233 | 0.2731 | 3.338 | 3.5642 | 19.0 |
| 24.9241 | 15.19 | 9800 | 13.1195 | 3.1785 | 0.2318 | 2.9449 | 3.1306 | 19.0 |
| 24.9241 | 15.35 | 9900 | 13.0481 | 3.0249 | 0.2192 | 2.7991 | 2.9925 | 19.0 |
| 24.4511 | 15.5 | 10000 | 13.0693 | 3.1189 | 0.2287 | 2.8726 | 3.0669 | 19.0 |
| 24.4511 | 15.66 | 10100 | 12.9204 | 2.6405 | 0.1899 | 2.4337 | 2.61 | 19.0 |
| 24.4511 | 15.81 | 10200 | 12.9200 | 2.9037 | 0.2148 | 2.6775 | 2.8683 | 19.0 |
| 24.4511 | 15.97 | 10300 | 12.9203 | 2.8847 | 0.2034 | 2.6586 | 2.8438 | 19.0 |
| 24.4511 | 16.12 | 10400 | 12.8723 | 2.8195 | 0.1976 | 2.5922 | 2.7803 | 19.0 |
| 23.8949 | 16.28 | 10500 | 12.9749 | 3.2658 | 0.2217 | 2.9905 | 3.2262 | 19.0 |
| 23.8949 | 16.43 | 10600 | 12.7975 | 2.9762 | 0.1844 | 2.7295 | 2.9474 | 19.0 |
| 23.8949 | 16.59 | 10700 | 12.7497 | 2.5496 | 0.1406 | 2.3536 | 2.5269 | 19.0 |
| 23.8949 | 16.74 | 10800 | 12.6485 | 2.5509 | 0.1454 | 2.343 | 2.5182 | 19.0 |
| 23.8949 | 16.9 | 10900 | 12.6574 | 2.1914 | 0.1281 | 2.0113 | 2.1574 | 19.0 |
| 23.4963 | 17.05 | 11000 | 12.6919 | 2.1748 | 0.1299 | 1.9909 | 2.1229 | 19.0 |
| 23.4963 | 17.21 | 11100 | 12.5660 | 2.3751 | 0.1177 | 2.1417 | 2.326 | 19.0 |
| 23.4963 | 17.36 | 11200 | 12.5866 | 2.6893 | 0.1344 | 2.4378 | 2.6318 | 19.0 |
| 23.4963 | 17.52 | 11300 | 12.5427 | 2.5546 | 0.1411 | 2.3175 | 2.5073 | 19.0 |
| 23.4963 | 17.67 | 11400 | 12.5011 | 2.347 | 0.1223 | 2.1322 | 2.3077 | 19.0 |
| 23.1492 | 17.83 | 11500 | 12.5168 | 2.2304 | 0.1141 | 2.0657 | 2.1951 | 19.0 |
| 23.1492 | 17.98 | 11600 | 12.4043 | 2.4485 | 0.1209 | 2.2548 | 2.4114 | 19.0 |
| 23.1492 | 18.14 | 11700 | 12.4192 | 2.0551 | 0.0887 | 1.8996 | 2.0199 | 19.0 |
| 23.1492 | 18.29 | 11800 | 12.3799 | 2.1076 | 0.0932 | 1.9464 | 2.0589 | 19.0 |
| 23.1492 | 18.45 | 11900 | 12.4263 | 2.4136 | 0.1152 | 2.2172 | 2.357 | 19.0 |
| 22.7005 | 18.6 | 12000 | 12.3218 | 2.1197 | 0.1105 | 1.9997 | 2.0873 | 19.0 |
| 22.7005 | 18.76 | 12100 | 12.3297 | 2.1883 | 0.1102 | 2.0414 | 2.1267 | 19.0 |
| 22.7005 | 18.91 | 12200 | 12.3026 | 1.966 | 0.0954 | 1.8387 | 1.9469 | 19.0 |
| 22.7005 | 19.07 | 12300 | 12.3030 | 2.0179 | 0.0955 | 1.8834 | 1.9858 | 19.0 |
| 22.7005 | 19.22 | 12400 | 12.2478 | 1.9549 | 0.0948 | 1.8437 | 1.9092 | 19.0 |
| 22.3178 | 19.38 | 12500 | 12.1803 | 1.6396 | 0.0648 | 1.5296 | 1.6208 | 19.0 |
| 22.3178 | 19.53 | 12600 | 12.1732 | 1.5568 | 0.0769 | 1.4894 | 1.5387 | 19.0 |
| 22.3178 | 19.69 | 12700 | 12.1342 | 1.6861 | 0.0782 | 1.6105 | 1.666 | 19.0 |
| 22.3178 | 19.84 | 12800 | 12.1313 | 2.023 | 0.0965 | 1.9295 | 2.0072 | 19.0 |
| 22.3178 | 20.0 | 12900 | 12.1315 | 1.5878 | 0.0701 | 1.5153 | 1.5467 | 19.0 |
| 21.8344 | 20.16 | 13000 | 12.0611 | 1.6406 | 0.0637 | 1.5665 | 1.6033 | 19.0 |
| 21.8344 | 20.31 | 13100 | 12.0327 | 1.5913 | 0.0544 | 1.5209 | 1.552 | 19.0 |
| 21.8344 | 20.47 | 13200 | 12.0466 | 1.3618 | 0.0494 | 1.3186 | 1.33 | 19.0 |
| 21.8344 | 20.62 | 13300 | 12.0787 | 1.4445 | 0.0451 | 1.4073 | 1.41 | 19.0 |
| 21.8344 | 20.78 | 13400 | 11.9829 | 1.3465 | 0.0494 | 1.3247 | 1.3167 | 19.0 |
| 21.6309 | 20.93 | 13500 | 11.9072 | 1.4165 | 0.0519 | 1.3761 | 1.3839 | 19.0 |
| 21.6309 | 21.09 | 13600 | 11.9261 | 1.3969 | 0.0502 | 1.3606 | 1.3618 | 19.0 |
| 21.6309 | 21.24 | 13700 | 11.8313 | 1.3337 | 0.0337 | 1.2974 | 1.316 | 19.0 |
| 21.6309 | 21.4 | 13800 | 11.7709 | 1.3045 | 0.0371 | 1.2746 | 1.2889 | 19.0 |
| 21.6309 | 21.55 | 13900 | 11.8402 | 1.6106 | 0.0391 | 1.5678 | 1.5697 | 19.0 |
| 21.2262 | 21.71 | 14000 | 11.7132 | 1.3261 | 0.0222 | 1.296 | 1.3051 | 19.0 |
| 21.2262 | 21.86 | 14100 | 11.7206 | 1.41 | 0.0252 | 1.374 | 1.3985 | 19.0 |
| 21.2262 | 22.02 | 14200 | 11.7033 | 1.6231 | 0.0478 | 1.5632 | 1.5851 | 19.0 |
| 21.2262 | 22.17 | 14300 | 11.7385 | 1.8974 | 0.0618 | 1.8339 | 1.8583 | 19.0 |
| 21.2262 | 22.33 | 14400 | 11.6519 | 1.8998 | 0.0541 | 1.8285 | 1.8552 | 19.0 |
| 20.8055 | 22.48 | 14500 | 11.6039 | 1.9561 | 0.0582 | 1.859 | 1.9073 | 19.0 |
| 20.8055 | 22.64 | 14600 | 11.6322 | 1.7731 | 0.0442 | 1.7061 | 1.7303 | 19.0 |
| 20.8055 | 22.79 | 14700 | 11.6046 | 1.8874 | 0.0618 | 1.8083 | 1.8539 | 19.0 |
| 20.8055 | 22.95 | 14800 | 11.5051 | 1.4271 | 0.016 | 1.3996 | 1.4086 | 19.0 |
| 20.8055 | 23.1 | 14900 | 11.5564 | 1.743 | 0.0451 | 1.6787 | 1.727 | 19.0 |
| 20.6263 | 23.26 | 15000 | 11.5024 | 1.9313 | 0.0575 | 1.8357 | 1.887 | 19.0 |
| 20.6263 | 23.41 | 15100 | 11.5281 | 2.082 | 0.0435 | 1.9865 | 2.0327 | 19.0 |
| 20.6263 | 23.57 | 15200 | 11.4223 | 1.9773 | 0.0332 | 1.9038 | 1.9432 | 19.0 |
| 20.6263 | 23.72 | 15300 | 11.4675 | 1.7845 | 0.0831 | 1.6835 | 1.7414 | 19.0 |
| 20.6263 | 23.88 | 15400 | 11.3882 | 2.1183 | 0.0715 | 1.9965 | 2.0725 | 19.0 |
| 20.3154 | 24.03 | 15500 | 11.4197 | 2.4045 | 0.1336 | 2.2302 | 2.3024 | 19.0 |
| 20.3154 | 24.19 | 15600 | 11.3558 | 1.9596 | 0.1196 | 1.8152 | 1.8748 | 19.0 |
| 20.3154 | 24.34 | 15700 | 11.3438 | 2.0931 | 0.111 | 1.9469 | 1.999 | 19.0 |
| 20.3154 | 24.5 | 15800 | 11.3021 | 2.2159 | 0.1257 | 2.0511 | 2.1345 | 19.0 |
| 20.3154 | 24.65 | 15900 | 11.3178 | 2.093 | 0.132 | 1.9083 | 1.9969 | 19.0 |
| 20.0858 | 24.81 | 16000 | 11.2377 | 1.6589 | 0.1129 | 1.5625 | 1.6245 | 19.0 |
| 20.0858 | 24.96 | 16100 | 11.2058 | 1.6667 | 0.0854 | 1.5597 | 1.6223 | 19.0 |
| 20.0858 | 25.12 | 16200 | 11.1602 | 2.0907 | 0.1219 | 1.9297 | 1.9988 | 19.0 |
| 20.0858 | 25.27 | 16300 | 11.1666 | 1.86 | 0.1092 | 1.7398 | 1.7993 | 19.0 |
| 20.0858 | 25.43 | 16400 | 11.1807 | 1.8879 | 0.1818 | 1.7579 | 1.8335 | 19.0 |
| 19.7588 | 25.58 | 16500 | 11.1310 | 2.0377 | 0.1612 | 1.8653 | 1.9538 | 19.0 |
| 19.7588 | 25.74 | 16600 | 11.1577 | 2.1441 | 0.1767 | 1.9546 | 2.0518 | 19.0 |
| 19.7588 | 25.89 | 16700 | 11.0748 | 1.8679 | 0.1892 | 1.7249 | 1.7822 | 19.0 |
| 19.7588 | 26.05 | 16800 | 11.1048 | 2.2775 | 0.2072 | 2.0566 | 2.1521 | 19.0 |
| 19.7588 | 26.2 | 16900 | 11.0498 | 1.8117 | 0.161 | 1.6879 | 1.7357 | 19.0 |
| 19.4627 | 26.36 | 17000 | 11.0435 | 1.7875 | 0.1627 | 1.6626 | 1.7306 | 19.0 |
| 19.4627 | 26.51 | 17100 | 10.9406 | 1.7333 | 0.1645 | 1.6051 | 1.6671 | 19.0 |
| 19.4627 | 26.67 | 17200 | 10.9242 | 1.596 | 0.1426 | 1.4747 | 1.5341 | 19.0 |
| 19.4627 | 26.82 | 17300 | 10.9571 | 1.9874 | 0.2109 | 1.8109 | 1.9061 | 19.0 |
| 19.4627 | 26.98 | 17400 | 10.9265 | 1.6999 | 0.1353 | 1.5574 | 1.6402 | 19.0 |
| 19.2619 | 27.13 | 17500 | 10.8919 | 1.7543 | 0.1709 | 1.587 | 1.6605 | 19.0 |
| 19.2619 | 27.29 | 17600 | 10.8382 | 2.126 | 0.2056 | 1.8609 | 2.0021 | 19.0 |
| 19.2619 | 27.44 | 17700 | 10.8936 | 1.9626 | 0.1726 | 1.7402 | 1.8665 | 19.0 |
| 19.2619 | 27.6 | 17800 | 10.8565 | 1.7668 | 0.1673 | 1.5914 | 1.7099 | 19.0 |
| 19.2619 | 27.75 | 17900 | 10.9047 | 2.0972 | 0.1867 | 1.8519 | 2.0224 | 19.0 |
| 19.0457 | 27.91 | 18000 | 10.7900 | 2.7761 | 0.2904 | 2.4403 | 2.6936 | 19.0 |
| 19.0457 | 28.06 | 18100 | 10.7191 | 2.3652 | 0.2431 | 2.0989 | 2.2767 | 19.0 |
| 19.0457 | 28.22 | 18200 | 10.7462 | 3.3125 | 0.361 | 2.847 | 3.1506 | 19.0 |
| 19.0457 | 28.37 | 18300 | 10.7721 | 2.9247 | 0.3 | 2.5443 | 2.806 | 19.0 |
| 19.0457 | 28.53 | 18400 | 10.7208 | 2.5398 | 0.2812 | 2.2211 | 2.4312 | 19.0 |
| 18.8301 | 28.68 | 18500 | 10.6708 | 2.5902 | 0.281 | 2.2765 | 2.4881 | 19.0 |
| 18.8301 | 28.84 | 18600 | 10.7220 | 2.276 | 0.2061 | 1.9904 | 2.1922 | 19.0 |
| 18.8301 | 28.99 | 18700 | 10.6855 | 2.8678 | 0.3496 | 2.52 | 2.751 | 19.0 |
| 18.8301 | 29.15 | 18800 | 10.6550 | 2.5232 | 0.2724 | 2.2108 | 2.4314 | 19.0 |
| 18.8301 | 29.3 | 18900 | 10.6488 | 2.5629 | 0.2203 | 2.2361 | 2.4261 | 19.0 |
| 18.5872 | 29.46 | 19000 | 10.6123 | 2.5052 | 0.1923 | 2.1381 | 2.3821 | 19.0 |
| 18.5872 | 29.61 | 19100 | 10.6105 | 3.7779 | 0.3653 | 3.2404 | 3.5759 | 19.0 |
| 18.5872 | 29.77 | 19200 | 10.5823 | 3.8282 | 0.3743 | 3.2645 | 3.6077 | 19.0 |
| 18.5872 | 29.92 | 19300 | 10.5606 | 3.0976 | 0.277 | 2.6041 | 2.8838 | 19.0 |
| 18.5872 | 30.08 | 19400 | 10.5846 | 3.638 | 0.3482 | 3.0804 | 3.4294 | 19.0 |
| 18.2839 | 30.23 | 19500 | 10.4722 | 2.6173 | 0.2326 | 2.2268 | 2.4656 | 19.0 |
| 18.2839 | 30.39 | 19600 | 10.5211 | 3.5085 | 0.3377 | 2.9751 | 3.2889 | 19.0 |
| 18.2839 | 30.54 | 19700 | 10.4735 | 2.4781 | 0.2097 | 2.1099 | 2.3338 | 19.0 |
| 18.2839 | 30.7 | 19800 | 10.4545 | 3.1459 | 0.3022 | 2.6844 | 2.9559 | 19.0 |
| 18.2839 | 30.85 | 19900 | 10.4525 | 3.6095 | 0.3637 | 3.0873 | 3.3886 | 19.0 |
| 18.1352 | 31.01 | 20000 | 10.4409 | 4.0556 | 0.4621 | 3.3857 | 3.7778 | 19.0 |
| 18.1352 | 31.16 | 20100 | 10.4132 | 3.8346 | 0.3863 | 3.2323 | 3.6266 | 19.0 |
| 18.1352 | 31.32 | 20200 | 10.4468 | 2.3736 | 0.1977 | 2.0195 | 2.236 | 19.0 |
| 18.1352 | 31.47 | 20300 | 10.3896 | 3.6954 | 0.3512 | 3.1402 | 3.4667 | 19.0 |
| 18.1352 | 31.63 | 20400 | 10.3546 | 3.5158 | 0.3558 | 3.0575 | 3.3116 | 19.0 |
| 17.9834 | 31.78 | 20500 | 10.3632 | 3.179 | 0.3374 | 2.7634 | 2.9846 | 19.0 |
| 17.9834 | 31.94 | 20600 | 10.3168 | 3.9121 | 0.4012 | 3.3812 | 3.687 | 19.0 |
| 17.9834 | 32.09 | 20700 | 10.2772 | 3.6148 | 0.3667 | 3.1059 | 3.3541 | 19.0 |
| 17.9834 | 32.25 | 20800 | 10.3173 | 3.1448 | 0.2924 | 2.6948 | 2.9338 | 19.0 |
| 17.9834 | 32.4 | 20900 | 10.2154 | 2.4611 | 0.1922 | 2.1597 | 2.3288 | 19.0 |
| 17.6192 | 32.56 | 21000 | 10.2957 | 3.3177 | 0.3762 | 2.8085 | 3.0595 | 19.0 |
| 17.6192 | 32.71 | 21100 | 10.2064 | 3.4663 | 0.3819 | 3.0229 | 3.2201 | 19.0 |
| 17.6192 | 32.87 | 21200 | 10.2235 | 3.245 | 0.3179 | 2.7618 | 3.0066 | 19.0 |
| 17.6192 | 33.02 | 21300 | 10.2193 | 2.5572 | 0.2775 | 2.216 | 2.3892 | 19.0 |
| 17.6192 | 33.18 | 21400 | 10.2467 | 3.4873 | 0.3934 | 3.02 | 3.2701 | 19.0 |
| 17.5532 | 33.33 | 21500 | 10.2378 | 2.8087 | 0.3049 | 2.4001 | 2.6218 | 19.0 |
| 17.5532 | 33.49 | 21600 | 10.2086 | 3.8967 | 0.4801 | 3.3678 | 3.603 | 19.0 |
| 17.5532 | 33.64 | 21700 | 10.2384 | 2.6534 | 0.3239 | 2.3276 | 2.4692 | 19.0 |
| 17.5532 | 33.8 | 21800 | 10.1929 | 2.6025 | 0.2845 | 2.2653 | 2.4507 | 19.0 |
| 17.5532 | 33.95 | 21900 | 10.1016 | 3.3244 | 0.377 | 2.8311 | 3.0784 | 19.0 |
| 17.3872 | 34.11 | 22000 | 10.1407 | 3.4245 | 0.4024 | 3.044 | 3.1865 | 19.0 |
| 17.3872 | 34.26 | 22100 | 10.0760 | 3.9251 | 0.4272 | 3.4064 | 3.6497 | 19.0 |
| 17.3872 | 34.42 | 22200 | 10.0998 | 3.3034 | 0.3438 | 2.8977 | 3.1141 | 19.0 |
| 17.3872 | 34.57 | 22300 | 10.0834 | 2.4967 | 0.266 | 2.2301 | 2.3647 | 19.0 |
| 17.3872 | 34.73 | 22400 | 9.9902 | 4.0828 | 0.4867 | 3.5482 | 3.7861 | 19.0 |
| 17.1744 | 34.88 | 22500 | 10.0366 | 3.5772 | 0.4377 | 3.1153 | 3.3199 | 19.0 |
| 17.1744 | 35.04 | 22600 | 10.0299 | 3.5342 | 0.433 | 3.0501 | 3.2176 | 19.0 |
| 17.1744 | 35.19 | 22700 | 9.9912 | 3.7754 | 0.4445 | 3.3191 | 3.502 | 19.0 |
| 17.1744 | 35.35 | 22800 | 9.9580 | 4.5086 | 0.5514 | 3.8986 | 4.1987 | 19.0 |
| 17.1744 | 35.5 | 22900 | 9.9676 | 3.526 | 0.3942 | 3.0859 | 3.3082 | 19.0 |
| 17.0687 | 35.66 | 23000 | 9.9874 | 3.7058 | 0.5139 | 3.2353 | 3.4611 | 19.0 |
| 17.0687 | 35.81 | 23100 | 9.9536 | 3.6588 | 0.4552 | 3.1591 | 3.3554 | 19.0 |
| 17.0687 | 35.97 | 23200 | 9.8948 | 3.6279 | 0.3933 | 3.1403 | 3.3426 | 19.0 |
| 17.0687 | 36.12 | 23300 | 9.8397 | 3.8101 | 0.4971 | 3.3152 | 3.5133 | 19.0 |
| 17.0687 | 36.28 | 23400 | 9.8995 | 3.3201 | 0.4209 | 2.9101 | 3.0903 | 19.0 |
| 16.7686 | 36.43 | 23500 | 9.9085 | 4.0108 | 0.6389 | 3.5055 | 3.7286 | 19.0 |
| 16.7686 | 36.59 | 23600 | 9.8688 | 3.6051 | 0.5164 | 3.1651 | 3.3781 | 19.0 |
| 16.7686 | 36.74 | 23700 | 9.8673 | 4.4987 | 0.6051 | 3.8789 | 4.1868 | 19.0 |
| 16.7686 | 36.9 | 23800 | 9.8848 | 3.6926 | 0.5635 | 3.1681 | 3.3902 | 19.0 |
| 16.7686 | 37.05 | 23900 | 9.8497 | 3.518 | 0.4283 | 3.1159 | 3.3112 | 19.0 |
| 16.7432 | 37.21 | 24000 | 9.8044 | 3.3369 | 0.3772 | 2.9784 | 3.147 | 19.0 |
| 16.7432 | 37.36 | 24100 | 9.7768 | 3.5862 | 0.3819 | 3.1273 | 3.3535 | 19.0 |
| 16.7432 | 37.52 | 24200 | 9.7536 | 4.1823 | 0.5884 | 3.645 | 3.8843 | 19.0 |
| 16.7432 | 37.67 | 24300 | 9.7953 | 4.3981 | 0.6441 | 3.7941 | 4.0623 | 19.0 |
| 16.7432 | 37.83 | 24400 | 9.6742 | 3.7833 | 0.4755 | 3.3516 | 3.5543 | 19.0 |
| 16.5714 | 37.98 | 24500 | 9.7946 | 3.3839 | 0.495 | 3.0021 | 3.156 | 19.0 |
| 16.5714 | 38.14 | 24600 | 9.7544 | 4.3873 | 0.6486 | 3.8188 | 4.0653 | 19.0 |
| 16.5714 | 38.29 | 24700 | 9.7586 | 3.4403 | 0.4756 | 3.0402 | 3.2405 | 19.0 |
| 16.5714 | 38.45 | 24800 | 9.7895 | 3.6822 | 0.6247 | 3.2612 | 3.4746 | 19.0 |
| 16.5714 | 38.6 | 24900 | 9.6964 | 3.8743 | 0.6209 | 3.4159 | 3.6051 | 19.0 |
| 16.3393 | 38.76 | 25000 | 9.7190 | 4.1508 | 0.635 | 3.5925 | 3.8753 | 19.0 |
| 16.3393 | 38.91 | 25100 | 9.6435 | 3.6755 | 0.4777 | 3.268 | 3.4572 | 19.0 |
| 16.3393 | 39.07 | 25200 | 9.6390 | 2.9478 | 0.4049 | 2.6531 | 2.7782 | 19.0 |
| 16.3393 | 39.22 | 25300 | 9.6300 | 2.9973 | 0.3897 | 2.6662 | 2.7943 | 19.0 |
| 16.3393 | 39.38 | 25400 | 9.6229 | 3.6726 | 0.4182 | 3.2207 | 3.4595 | 19.0 |
| 16.3076 | 39.53 | 25500 | 9.6392 | 2.9691 | 0.3692 | 2.6709 | 2.8182 | 19.0 |
| 16.3076 | 39.69 | 25600 | 9.5978 | 2.8167 | 0.3437 | 2.593 | 2.7155 | 19.0 |
| 16.3076 | 39.84 | 25700 | 9.6111 | 3.5135 | 0.5453 | 3.1415 | 3.3042 | 19.0 |
| 16.3076 | 40.0 | 25800 | 9.6118 | 3.459 | 0.4963 | 3.1351 | 3.2809 | 19.0 |
| 16.3076 | 40.16 | 25900 | 9.5994 | 3.5735 | 0.539 | 3.2556 | 3.3904 | 19.0 |
| 16.0684 | 40.31 | 26000 | 9.5526 | 3.3388 | 0.4689 | 2.9753 | 3.1562 | 19.0 |
| 16.0684 | 40.47 | 26100 | 9.5365 | 3.0882 | 0.392 | 2.8072 | 2.9556 | 19.0 |
| 16.0684 | 40.62 | 26200 | 9.5571 | 3.0022 | 0.4109 | 2.7108 | 2.8575 | 19.0 |
| 16.0684 | 40.78 | 26300 | 9.5240 | 3.506 | 0.5734 | 3.1577 | 3.3378 | 19.0 |
| 16.0684 | 40.93 | 26400 | 9.4913 | 3.5936 | 0.5165 | 3.2452 | 3.4134 | 19.0 |
| 15.9425 | 41.09 | 26500 | 9.5297 | 3.7802 | 0.6862 | 3.4061 | 3.5436 | 19.0 |
| 15.9425 | 41.24 | 26600 | 9.4657 | 3.8433 | 0.6105 | 3.4621 | 3.638 | 19.0 |
| 15.9425 | 41.4 | 26700 | 9.5049 | 3.5822 | 0.6462 | 3.231 | 3.3745 | 19.0 |
| 15.9425 | 41.55 | 26800 | 9.4739 | 2.9668 | 0.4426 | 2.7345 | 2.8134 | 19.0 |
| 15.9425 | 41.71 | 26900 | 9.4868 | 3.7458 | 0.6934 | 3.3708 | 3.5492 | 19.0 |
| 15.7779 | 41.86 | 27000 | 9.4683 | 3.5254 | 0.6006 | 3.1629 | 3.3011 | 19.0 |
| 15.7779 | 42.02 | 27100 | 9.4108 | 4.2731 | 0.7412 | 3.8236 | 4.0171 | 19.0 |
| 15.7779 | 42.17 | 27200 | 9.3994 | 3.5014 | 0.5738 | 3.1525 | 3.3306 | 19.0 |
| 15.7779 | 42.33 | 27300 | 9.3760 | 3.4929 | 0.4954 | 3.1402 | 3.3028 | 19.0 |
| 15.7779 | 42.48 | 27400 | 9.4201 | 4.2777 | 0.7152 | 3.7943 | 4.0349 | 19.0 |
| 15.7238 | 42.64 | 27500 | 9.3913 | 3.6489 | 0.6371 | 3.2903 | 3.4528 | 19.0 |
| 15.7238 | 42.79 | 27600 | 9.4269 | 3.5269 | 0.6042 | 3.2049 | 3.3528 | 19.0 |
| 15.7238 | 42.95 | 27700 | 9.3847 | 3.4735 | 0.5963 | 3.1522 | 3.2796 | 19.0 |
| 15.7238 | 43.1 | 27800 | 9.3474 | 3.8327 | 0.6428 | 3.406 | 3.5698 | 19.0 |
| 15.7238 | 43.26 | 27900 | 9.3293 | 3.5475 | 0.6313 | 3.1725 | 3.3367 | 19.0 |
| 15.5108 | 43.41 | 28000 | 9.3802 | 4.249 | 0.7997 | 3.7924 | 3.9849 | 19.0 |
| 15.5108 | 43.57 | 28100 | 9.2588 | 3.4476 | 0.4676 | 3.1758 | 3.2993 | 19.0 |
| 15.5108 | 43.72 | 28200 | 9.3447 | 4.0267 | 0.7081 | 3.6208 | 3.7957 | 19.0 |
| 15.5108 | 43.88 | 28300 | 9.2853 | 4.0105 | 0.7799 | 3.5848 | 3.7619 | 19.0 |
| 15.5108 | 44.03 | 28400 | 9.2753 | 3.1833 | 0.4678 | 2.9068 | 3.0168 | 19.0 |
| 15.4004 | 44.19 | 28500 | 9.2345 | 3.6778 | 0.5955 | 3.3212 | 3.4724 | 19.0 |
| 15.4004 | 44.34 | 28600 | 9.3130 | 3.9958 | 0.6892 | 3.5871 | 3.772 | 19.0 |
| 15.4004 | 44.5 | 28700 | 9.2984 | 4.1868 | 0.696 | 3.7194 | 3.9197 | 19.0 |
| 15.4004 | 44.65 | 28800 | 9.2722 | 3.8848 | 0.5914 | 3.5038 | 3.7022 | 19.0 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.