modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bkwebb23/gpt2-untemplated-quests | 5fcff363d9e1e09de37905677d63c2dd2b7bce0c | 2022-04-13T16:22:01.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"license:mit"
] | text-generation | false | bkwebb23 | null | bkwebb23/gpt2-untemplated-quests | 4 | null | transformers | 19,300 | ---
license: mit
---
|
namanpun/exp1 | fcc2920fc0d80a117814b1edfdf1b7d9b5abc03f | 2022-04-14T20:24:46.000Z | [
"pytorch",
"tf",
"rust",
"gpt2",
"text-generation",
"transformers",
"license:mit"
] | text-generation | false | namanpun | null | namanpun/exp1 | 4 | null | transformers | 19,301 | ---
license: mit
---
Exp1
FoundryxBittensor |
QuickRead/PPO-policy_v3 | 448a0d960f60c5a6285763622c9c3f4cb8a8995d | 2022-04-22T14:18:57.000Z | [
"pytorch",
"pegasus",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | QuickRead | null | QuickRead/PPO-policy_v3 | 4 | null | transformers | 19,302 | Entry not found |
vinaykudari/t5-acled-ie | 55852710027178e79becdc7310aaaa58b68e1f54 | 2022-05-09T03:58:44.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | vinaykudari | null | vinaykudari/t5-acled-ie | 4 | null | transformers | 19,303 | Entry not found |
SiriusRen/my-rubbish-model2 | c6f1aaa3bb7bfe97208795de652613ad0d225181 | 2022-04-14T06:03:09.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | SiriusRen | null | SiriusRen/my-rubbish-model2 | 4 | null | transformers | 19,304 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: my-rubbish-model2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# my-rubbish-model2
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
### Framework versions
- Transformers 4.18.0.dev0
- Pytorch 1.10.0
- Datasets 2.0.1.dev0
- Tokenizers 0.11.6
|
eleldar/marian-finetuned-kde4-en-to-fr-accelerate | a96ddbe5068c6da345f1e04d556038994f0c025b | 2022-04-14T11:46:34.000Z | [
"pytorch",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | eleldar | null | eleldar/marian-finetuned-kde4-en-to-fr-accelerate | 4 | null | transformers | 19,305 | Entry not found |
aaya/distilbert-base-uncased-finetuned-ner | dfea8f6885987932ff229eb037de4898abac3594 | 2022-04-15T05:46:55.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"token-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | token-classification | false | aaya | null | aaya/distilbert-base-uncased-finetuned-ner | 4 | null | transformers | 19,306 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: distilbert-base-uncased-finetuned-ner
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-ner
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Framework versions
- Transformers 4.18.0
- Pytorch 1.10.0+cu111
- Datasets 2.1.0
- Tokenizers 0.12.1
|
jason9693/koelectra-small-v3-generator-apeach | 483da65df11b24f0d0934ad7a1f20a466832302f | 2022-04-16T14:43:51.000Z | [
"pytorch",
"electra",
"text-classification",
"ko",
"dataset:jason9693/APEACH",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | jason9693 | null | jason9693/koelectra-small-v3-generator-apeach | 4 | null | transformers | 19,307 | ---
tags: autotrain
language: ko
widget:
- text: "개념 집에다 ctrl+z헤놓고 왔나"
datasets:
- jason9693/APEACH
co2_eq_emissions: 0.01856239042036965
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 742522663
- CO2 Emissions (in grams): 0.01856239042036965
## Validation Metrics
- Loss: 0.4798508286476135
- Accuracy: 0.7740053050397878
- Precision: 0.7236622073578596
- Recall: 0.9006243496357961
- AUC: 0.8798210006261515
- F1: 0.8025034770514604
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/jason9693/autotrain-kor_hate_eval-742522663
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("jason9693/autotrain-kor_hate_eval-742522663", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("jason9693/autotrain-kor_hate_eval-742522663", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
ketan-rmcf/hinglish-finetuned | ee0a3972f4231e06180cfeedd1509066e95a58dd | 2022-04-15T10:03:30.000Z | [
"pytorch",
"tf",
"tensorboard",
"bert",
"fill-mask",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | fill-mask | false | ketan-rmcf | null | ketan-rmcf/hinglish-finetuned | 4 | null | transformers | 19,308 | ---
tags:
- generated_from_trainer
model-index:
- name: hinglish-finetuned
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# hinglish-finetuned
This model is a fine-tuned version of [verloop/Hinglish-Bert](https://huggingface.co/verloop/Hinglish-Bert) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 2.0786
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 25
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.3784 | 1.0 | 80 | 3.0527 |
| 3.0398 | 2.0 | 160 | 2.8067 |
| 2.9133 | 3.0 | 240 | 2.7252 |
| 2.7872 | 4.0 | 320 | 2.5783 |
| 2.6205 | 5.0 | 400 | 2.5050 |
| 2.5979 | 6.0 | 480 | 2.4654 |
| 2.5655 | 7.0 | 560 | 2.4091 |
| 2.5412 | 8.0 | 640 | 2.3630 |
| 2.4479 | 9.0 | 720 | 2.3754 |
| 2.3724 | 10.0 | 800 | 2.2860 |
| 2.3842 | 11.0 | 880 | 2.2812 |
| 2.3411 | 12.0 | 960 | 2.2038 |
| 2.2617 | 13.0 | 1040 | 2.1887 |
| 2.3141 | 14.0 | 1120 | 2.1966 |
| 2.2115 | 15.0 | 1200 | 2.1248 |
| 2.2363 | 16.0 | 1280 | 2.1006 |
| 2.2191 | 17.0 | 1360 | 2.1248 |
| 2.1856 | 18.0 | 1440 | 2.0872 |
| 2.2009 | 19.0 | 1520 | 2.0299 |
| 2.2364 | 20.0 | 1600 | 2.0193 |
| 2.1785 | 21.0 | 1680 | 2.0227 |
| 2.1934 | 22.0 | 1760 | 2.0540 |
| 2.1479 | 23.0 | 1840 | 2.0381 |
| 2.0973 | 24.0 | 1920 | 1.9885 |
| 2.1376 | 25.0 | 2000 | 2.0142 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.10.0+cu111
- Datasets 2.1.0
- Tokenizers 0.12.1
|
dpazmino/finetuning-sentiment-model_duke_final_two | 0ba7fa775c77f30eeb746e6d6aac86047006c585 | 2022-04-15T17:30:54.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | dpazmino | null | dpazmino/finetuning-sentiment-model_duke_final_two | 4 | null | transformers | 19,309 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- f1
model-index:
- name: finetuning-sentiment-model_duke_final_two
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetuning-sentiment-model_duke_final_two
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3381
- F1: 0.8801
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.18.0
- Pytorch 1.10.0+cu111
- Datasets 2.1.0
- Tokenizers 0.12.1
|
Raychanan/COVID_RandomOver | 778b4778567a4fd02cb7c5bcb7221e643bd4738b | 2022-04-15T01:24:46.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-classification | false | Raychanan | null | Raychanan/COVID_RandomOver | 4 | null | transformers | 19,310 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- f1
model-index:
- name: results
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# results
This model is a fine-tuned version of [hfl/chinese-bert-wwm-ext](https://huggingface.co/hfl/chinese-bert-wwm-ext) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4235
- F1: 0.9546
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 1.1307 | 1.0 | 3268 | 0.9040 | 0.0 |
| 0.8795 | 2.0 | 6536 | 0.5532 | 0.9546 |
| 0.8183 | 3.0 | 9804 | 0.3641 | 0.9546 |
| 1.0074 | 4.0 | 13072 | 0.3998 | 0.9546 |
| 0.7947 | 5.0 | 16340 | 0.4235 | 0.9546 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.10.0+cu111
- Datasets 2.1.0
- Tokenizers 0.12.1
|
SiriusRen/my-finetuned-bert | a85d6e557a6296b221946777ff2c859716f4b804 | 2022-04-22T05:50:05.000Z | [
"pytorch",
"bert",
"transformers"
] | null | false | SiriusRen | null | SiriusRen/my-finetuned-bert | 4 | null | transformers | 19,311 | Entry not found |
MartinoMensio/racism-models-raw-label-epoch-2 | 974a57ce2be7aa1a07535d32f543a31b4f7c9abf | 2022-05-04T16:04:18.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-raw-label-epoch-2 | 4 | null | transformers | 19,312 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `raw-label-epoch-2`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'raw-label-epoch-2'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.8982619643211365}, {'label': 'non-racist', 'score': 0.6703745126724243}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-regression-w-m-vote-epoch-3 | 7369f665cbe1a220c4ff2681e3994c3a6ec6c2c2 | 2022-05-04T16:21:40.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-regression-w-m-vote-epoch-3 | 4 | null | transformers | 19,313 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `regression-w-m-vote-epoch-3`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
from transformers.pipelines import TextClassificationPipeline
class TextRegressionPipeline(TextClassificationPipeline):
"""
Class based on the TextClassificationPipeline from transformers.
The difference is that instead of being based on a classifier, it is based on a regressor.
You can specify the regression threshold when you call the pipeline or when you instantiate the pipeline.
"""
def __init__(self, **kwargs):
"""
Builds a new Pipeline based on regression.
regression_threshold: Optional(float). If None, the pipeline will simply output the score. If set to a specific value, the output will be both the score and the label.
"""
self.regression_threshold = kwargs.pop("regression_threshold", None)
super().__init__(**kwargs)
def __call__(self, *args, **kwargs):
"""
You can also specify the regression threshold when you call the pipeline.
regression_threshold: Optional(float). If None, the pipeline will simply output the score. If set to a specific value, the output will be both the score and the label.
"""
self.regression_threshold_call = kwargs.pop("regression_threshold", None)
result = super().__call__(*args, **kwargs)
return result
def postprocess(self, model_outputs, function_to_apply=None, return_all_scores=False):
outputs = model_outputs["logits"][0]
outputs = outputs.numpy()
scores = outputs
score = scores[0]
regression_threshold = self.regression_threshold
# override the specific threshold if it is specified in the call
if self.regression_threshold_call:
regression_threshold = self.regression_threshold_call
if regression_threshold:
return {"label": 'racist' if score > regression_threshold else 'non-racist', "score": score}
else:
return {"score": score}
model_name = 'regression-w-m-vote-epoch-3'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = TextRegressionPipeline(model=model, tokenizer=tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
# just get the score of regression
print(pipe(texts))
# [{'score': 0.7393736}, {'score': 0.44301373}]
# or also specify a threshold to cut racist/non-racist
print(pipe(texts, regression_threshold=0.9))
# [{'label': 'non-racist', 'score': 0.7393736}, {'label': 'non-racist', 'score': 0.44301373}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-m-vote-strict-epoch-1 | 480f53d5f94477229e355e9a1bbbc2b404ee4e23 | 2022-05-04T16:07:46.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-m-vote-strict-epoch-1 | 4 | null | transformers | 19,314 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `m-vote-strict-epoch-1`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'm-vote-strict-epoch-1'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.6074065566062927}, {'label': 'non-racist', 'score': 0.8047575950622559}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-m-vote-strict-epoch-2 | 92723b2b7a5a06c76003e0ee994165fd4bd15424 | 2022-05-04T16:08:39.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-m-vote-strict-epoch-2 | 4 | null | transformers | 19,315 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `m-vote-strict-epoch-2`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'm-vote-strict-epoch-2'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.923829972743988}, {'label': 'non-racist', 'score': 0.8673009872436523}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-m-vote-strict-epoch-4 | 838aa00f66a948070db5e89a0cba3b6358a6f5c6 | 2022-05-04T16:10:41.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-m-vote-strict-epoch-4 | 4 | null | transformers | 19,316 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `m-vote-strict-epoch-4`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'm-vote-strict-epoch-4'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.9965864419937134}, {'label': 'racist', 'score': 0.6058831214904785}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-m-vote-nonstrict-epoch-1 | b147380a037e6efda7c9c283f843b385975dbea1 | 2022-05-04T16:11:39.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-m-vote-nonstrict-epoch-1 | 4 | null | transformers | 19,317 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `m-vote-nonstrict-epoch-1`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'm-vote-nonstrict-epoch-1'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.9265261888504028}, {'label': 'non-racist', 'score': 0.802951991558075}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-m-vote-nonstrict-epoch-2 | e410d00797a4982bc52eaf4d9cfe87523114e9dd | 2022-05-04T16:12:34.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-m-vote-nonstrict-epoch-2 | 4 | null | transformers | 19,318 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `m-vote-nonstrict-epoch-2`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'm-vote-nonstrict-epoch-2'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.8650100827217102}, {'label': 'non-racist', 'score': 0.9674995541572571}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-m-vote-nonstrict-epoch-3 | bda4da5024c309fbdfa4fa5b27b8d1a8b8182e4c | 2022-05-04T16:13:17.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-m-vote-nonstrict-epoch-3 | 4 | null | transformers | 19,319 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `m-vote-nonstrict-epoch-3`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'm-vote-nonstrict-epoch-3'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.9642159342765808}, {'label': 'non-racist', 'score': 0.9484726786613464}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-m-vote-nonstrict-epoch-4 | 483269931d7746329090224fc787b36beb452cea | 2022-05-04T16:14:06.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-m-vote-nonstrict-epoch-4 | 4 | null | transformers | 19,320 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `m-vote-nonstrict-epoch-4`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'm-vote-nonstrict-epoch-4'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.9791656136512756}, {'label': 'non-racist', 'score': 0.996966540813446}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1 | d8e6c81cd6bc1a6a12b5453b6d43e24d7a6658a7 | 2022-05-04T16:27:31.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1 | 4 | null | transformers | 19,321 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `w-m-vote-nonstrict-epoch-1`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'w-m-vote-nonstrict-epoch-1'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.8460916876792908}, {'label': 'non-racist', 'score': 0.9714874029159546}]
```
For more details, see https://github.com/preyero/neatclass22
|
MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3 | ed894e55d2bc73805b3a99b9310c7e4e267fcd7e | 2022-05-04T16:28:53.000Z | [
"pytorch",
"bert",
"text-classification",
"es",
"transformers",
"license:mit"
] | text-classification | false | MartinoMensio | null | MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3 | 4 | null | transformers | 19,322 | ---
language: es
license: mit
widget:
- text: "y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!"
---
### Description
This model is a fine-tuned version of [BETO (spanish bert)](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) that has been trained on the *Datathon Against Racism* dataset (2022)
We performed several experiments that will be described in the upcoming paper "Estimating Ground Truth in a Low-labelled Data Regime:A Study of Racism Detection in Spanish" (NEATClasS 2022)
We applied 6 different methods ground-truth estimations, and for each one we performed 4 epochs of fine-tuning. The result is made of 24 models:
| method | epoch 1 | epoch 3 | epoch 3 | epoch 4 |
|--- |--- |--- |--- |--- |
| raw-label | [raw-label-epoch-1](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-1) | [raw-label-epoch-2](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-2) | [raw-label-epoch-3](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-3) | [raw-label-epoch-4](https://huggingface.co/MartinoMensio/racism-models-raw-label-epoch-4) |
| m-vote-strict | [m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-1) | [m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-2) | [m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-3) | [m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-strict-epoch-4) |
| m-vote-nonstrict | [m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-1) | [m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-2) | [m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-3) | [m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-m-vote-nonstrict-epoch-4) |
| regression-w-m-vote | [regression-w-m-vote-epoch-1](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-1) | [regression-w-m-vote-epoch-2](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-2) | [regression-w-m-vote-epoch-3](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-3) | [regression-w-m-vote-epoch-4](https://huggingface.co/MartinoMensio/racism-models-regression-w-m-vote-epoch-4) |
| w-m-vote-strict | [w-m-vote-strict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-1) | [w-m-vote-strict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-2) | [w-m-vote-strict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-3) | [w-m-vote-strict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-strict-epoch-4) |
| w-m-vote-nonstrict | [w-m-vote-nonstrict-epoch-1](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-1) | [w-m-vote-nonstrict-epoch-2](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-2) | [w-m-vote-nonstrict-epoch-3](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-3) | [w-m-vote-nonstrict-epoch-4](https://huggingface.co/MartinoMensio/racism-models-w-m-vote-nonstrict-epoch-4) |
This model is `w-m-vote-nonstrict-epoch-3`
### Usage
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
model_name = 'w-m-vote-nonstrict-epoch-3'
tokenizer = AutoTokenizer.from_pretrained("dccuchile/bert-base-spanish-wwm-uncased")
full_model_path = f'MartinoMensio/racism-models-{model_name}'
model = AutoModelForSequenceClassification.from_pretrained(full_model_path)
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer)
texts = [
'y porqué es lo que hay que hacer con los menas y con los adultos también!!!! NO a los inmigrantes ilegales!!!!',
'Es que los judíos controlan el mundo'
]
print(pipe(texts))
# [{'label': 'racist', 'score': 0.9937393665313721}, {'label': 'non-racist', 'score': 0.9902436137199402}]
```
For more details, see https://github.com/preyero/neatclass22
|
Chikashi/t5-small-finetuned-cnndm3-wikihow3 | 9ec5836da1363ab023bf9d066921ad3c8b35627e | 2022-04-16T01:42:47.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:wikihow",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | Chikashi | null | Chikashi/t5-small-finetuned-cnndm3-wikihow3 | 4 | null | transformers | 19,323 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- wikihow
metrics:
- rouge
model-index:
- name: t5-small-finetuned-cnndm3-wikihow3
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: wikihow
type: wikihow
args: all
metrics:
- name: Rouge1
type: rouge
value: 27.2654
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-cnndm3-wikihow3
This model is a fine-tuned version of [Chikashi/t5-small-finetuned-cnndm3-wikihow2](https://huggingface.co/Chikashi/t5-small-finetuned-cnndm3-wikihow2) on the wikihow dataset.
It achieves the following results on the evaluation set:
- Loss: 2.3138
- Rouge1: 27.2654
- Rouge2: 10.5461
- Rougel: 23.2451
- Rougelsum: 26.6151
- Gen Len: 18.5263
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
| 2.5019 | 1.0 | 39313 | 2.3138 | 27.2654 | 10.5461 | 23.2451 | 26.6151 | 18.5263 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.10.0+cu111
- Datasets 2.1.0
- Tokenizers 0.12.1
|
dennishe97/longformer-code-relatedness | 804849619668d3e5e481af5f6f90db0fdf6ebef8 | 2022-04-16T05:25:18.000Z | [
"pytorch",
"longformer",
"transformers"
] | null | false | dennishe97 | null | dennishe97/longformer-code-relatedness | 4 | null | transformers | 19,324 | Entry not found |
jason9693/soongsil-bert-base-apeach | 18cc9bd7812c15bc2befe753d1327aad4d216f45 | 2022-04-16T14:20:38.000Z | [
"pytorch",
"roberta",
"text-classification",
"ko",
"dataset:jason9693/APEACH",
"transformers"
] | text-classification | false | jason9693 | null | jason9693/soongsil-bert-base-apeach | 4 | null | transformers | 19,325 | ---
language: ko
widget:
- text: "응 어쩔티비~~"
datasets:
- jason9693/APEACH
--- |
crcb/goemos | de9cf308da9a92fc464231b01439bcd31d195554 | 2022-04-16T15:16:07.000Z | [
"pytorch",
"bert",
"text-classification",
"en",
"dataset:crcb/autotrain-data-go_emo",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | crcb | null | crcb/goemos | 4 | null | transformers | 19,326 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- crcb/autotrain-data-go_emo
co2_eq_emissions: 31.11935827749309
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 748922872
- CO2 Emissions (in grams): 31.11935827749309
## Validation Metrics
- Loss: 0.17039568722248077
- Accuracy: 0.93625
- Macro F1: 0.9075787460059076
- Micro F1: 0.93625
- Weighted F1: 0.9371621543264445
- Macro Precision: 0.8945117620407296
- Micro Precision: 0.93625
- Weighted Precision: 0.9433589433926076
- Macro Recall: 0.9323604226458176
- Micro Recall: 0.93625
- Weighted Recall: 0.93625
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crcb/autotrain-go_emo-748922872
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crcb/autotrain-go_emo-748922872", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crcb/autotrain-go_emo-748922872", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
crcb/emo_nojoylove | f83f7c99e05f0176efdf02b7af6c0785df71f458 | 2022-04-17T14:19:31.000Z | [
"pytorch",
"bert",
"text-classification",
"en",
"dataset:crcb/autotrain-data-emo_carer_nojoylove",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | crcb | null | crcb/emo_nojoylove | 4 | null | transformers | 19,327 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- crcb/autotrain-data-emo_carer_nojoylove
co2_eq_emissions: 12.236769332727217
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 751422966
- CO2 Emissions (in grams): 12.236769332727217
## Validation Metrics
- Loss: 0.1358409821987152
- Accuracy: 0.9397905759162304
- Macro F1: 0.9096049124431982
- Micro F1: 0.9397905759162304
- Weighted F1: 0.9395954853807672
- Macro Precision: 0.919807346649452
- Micro Precision: 0.9397905759162304
- Weighted Precision: 0.9407259082357824
- Macro Recall: 0.9024000547645126
- Micro Recall: 0.9397905759162304
- Weighted Recall: 0.9397905759162304
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crcb/autotrain-emo_carer_nojoylove-751422966
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crcb/autotrain-emo_carer_nojoylove-751422966", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crcb/autotrain-emo_carer_nojoylove-751422966", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
crcb/carer_2 | 1799c7136c3a9a04e111fa1f13be0121d404d0df | 2022-04-17T14:14:39.000Z | [
"pytorch",
"roberta",
"text-classification",
"en",
"dataset:crcb/autotrain-data-emo_carer_nojoylove",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | crcb | null | crcb/carer_2 | 4 | null | transformers | 19,328 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- crcb/autotrain-data-emo_carer_nojoylove
co2_eq_emissions: 2.370895196595982
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 751422974
- CO2 Emissions (in grams): 2.370895196595982
## Validation Metrics
- Loss: 0.15362708270549774
- Accuracy: 0.9345549738219895
- Macro F1: 0.9016011681330569
- Micro F1: 0.9345549738219895
- Weighted F1: 0.9345413976263288
- Macro Precision: 0.9032333514618506
- Micro Precision: 0.9345549738219895
- Weighted Precision: 0.9345804677958041
- Macro Recall: 0.9001021129974442
- Micro Recall: 0.9345549738219895
- Weighted Recall: 0.9345549738219895
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crcb/autotrain-emo_carer_nojoylove-751422974
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crcb/autotrain-emo_carer_nojoylove-751422974", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crcb/autotrain-emo_carer_nojoylove-751422974", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
huggingtweets/crowsunflower-holyhorror8-witheredstrings | 10602e91f50629bb2ca0553c0d2cbd83daaec3dc | 2022-04-17T18:28:49.000Z | [
"pytorch",
"gpt2",
"text-generation",
"en",
"transformers",
"huggingtweets"
] | text-generation | false | huggingtweets | null | huggingtweets/crowsunflower-holyhorror8-witheredstrings | 4 | null | transformers | 19,329 | ---
language: en
thumbnail: http://www.huggingtweets.com/crowsunflower-holyhorror8-witheredstrings/1650220124956/predictions.png
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1515513843216171009/zT6m-Miq_400x400.jpg')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1510441725415899139/16Ro5tD5_400x400.jpg')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1511427151102287872/Onql0JIa_400x400.jpg')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">VacuumF & Jude obscura 🌒 & The Mad Puppet/Prophet</div>
<div style="text-align: center; font-size: 14px;">@crowsunflower-holyhorror8-witheredstrings</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from VacuumF & Jude obscura 🌒 & The Mad Puppet/Prophet.
| Data | VacuumF | Jude obscura 🌒 | The Mad Puppet/Prophet |
| --- | --- | --- | --- |
| Tweets downloaded | 454 | 3228 | 3243 |
| Retweets | 2 | 829 | 134 |
| Short tweets | 38 | 742 | 1275 |
| Tweets kept | 414 | 1657 | 1834 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1fsr8bm1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @crowsunflower-holyhorror8-witheredstrings's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/dgpcknqj) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/dgpcknqj/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/crowsunflower-holyhorror8-witheredstrings')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
crcb/hateval_re | 83fbdc212c324847bef71dd40802cefa9ca3ab49 | 2022-04-18T01:35:05.000Z | [
"pytorch",
"bert",
"text-classification",
"en",
"dataset:crcb/autotrain-data-hate_speech",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | crcb | null | crcb/hateval_re | 4 | null | transformers | 19,330 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- crcb/autotrain-data-hate_speech
co2_eq_emissions: 5.301132895184483
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 752122994
- CO2 Emissions (in grams): 5.301132895184483
## Validation Metrics
- Loss: 0.7107211351394653
- Accuracy: 0.7529411764705882
- Precision: 0.7502287282708143
- Recall: 0.9177392277560157
- AUC: 0.8358316393336287
- F1: 0.8255726151522779
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crcb/autotrain-hate_speech-752122994
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crcb/autotrain-hate_speech-752122994", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crcb/autotrain-hate_speech-752122994", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
ardallie/xlm-roberta-base-finetuned-panx-de | 2c43db15d16d8a50868ade4a023a6d63a34d30ee | 2022-04-18T02:20:27.000Z | [
"pytorch",
"tensorboard",
"xlm-roberta",
"token-classification",
"dataset:xtreme",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] | token-classification | false | ardallie | null | ardallie/xlm-roberta-base-finetuned-panx-de | 4 | null | transformers | 19,331 | ---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-de
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
args: PAN-X.de
metrics:
- name: F1
type: f1
value: 0.863114847211178
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-de
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1365
- F1: 0.8631
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.2513 | 1.0 | 525 | 0.1650 | 0.8206 |
| 0.1301 | 2.0 | 1050 | 0.1455 | 0.8454 |
| 0.08 | 3.0 | 1575 | 0.1365 | 0.8631 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.2
- Datasets 1.16.1
- Tokenizers 0.10.3
|
supriyaraj47/roberta-base-nli | 1661abd8133692c8b40307767ce58d57f8c151cb | 2022-04-18T03:31:55.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | false | supriyaraj47 | null | supriyaraj47/roberta-base-nli | 4 | null | transformers | 19,332 | Entry not found |
Jatin-WIAI/marathi_relevance_clf | d15522e51540aa108fc4f768cf990baad0353bf8 | 2022-04-18T11:40:27.000Z | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
] | text-classification | false | Jatin-WIAI | null | Jatin-WIAI/marathi_relevance_clf | 4 | null | transformers | 19,333 | Entry not found |
SimoC/distilbert-base-uncased-finetuned-emotion | 7d29435c7beda8cb9a0e58011934236dceb4f77f | 2022-04-18T12:57:57.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers"
] | text-classification | false | SimoC | null | SimoC/distilbert-base-uncased-finetuned-emotion | 4 | null | transformers | 19,334 | Entry not found |
crcb/hs_dvs | dbb3041e949c7e0bc502a1ad7caf9d36ce719749 | 2022-04-18T13:43:00.000Z | [
"pytorch",
"distilbert",
"text-classification",
"en",
"dataset:crcb/autotrain-data-dvs",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | crcb | null | crcb/hs_dvs | 4 | null | transformers | 19,335 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- crcb/autotrain-data-dvs
co2_eq_emissions: 5.1746636998598445
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 753223051
- CO2 Emissions (in grams): 5.1746636998598445
## Validation Metrics
- Loss: 0.14639143645763397
- Accuracy: 0.9493645350010087
- Precision: 0.5460992907801419
- Recall: 0.2916666666666667
- AUC: 0.8843542768404266
- F1: 0.3802469135802469
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crcb/autotrain-dvs-753223051
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crcb/autotrain-dvs-753223051", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crcb/autotrain-dvs-753223051", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
crcb/imp_hatred_f | 415d7a50b368d20e0dfb9ccedc8aeae0263e0562 | 2022-04-18T14:11:31.000Z | [
"pytorch",
"bert",
"text-classification",
"en",
"dataset:crcb/autotrain-data-imp_hs",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | crcb | null | crcb/imp_hatred_f | 4 | null | transformers | 19,336 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- crcb/autotrain-data-imp_hs
co2_eq_emissions: 0.05286505617263864
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 753423076
- CO2 Emissions (in grams): 0.05286505617263864
## Validation Metrics
- Loss: 0.539419412612915
- Accuracy: 0.7616387337057728
- Macro F1: 0.6428050387135232
- Micro F1: 0.761638733705773
- Weighted F1: 0.7592341595725172
- Macro Precision: 0.6606534010647378
- Micro Precision: 0.7616387337057728
- Weighted Precision: 0.7575825822976101
- Macro Recall: 0.6293404928847536
- Micro Recall: 0.7616387337057728
- Weighted Recall: 0.7616387337057728
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crcb/autotrain-imp_hs-753423076
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crcb/autotrain-imp_hs-753423076", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crcb/autotrain-imp_hs-753423076", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
wangmiaobeng/chinese-bert-wwm-finetuned-jd | b9681136544348bc47558482bbfe97e815c74895 | 2022-04-18T15:17:37.000Z | [
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | fill-mask | false | wangmiaobeng | null | wangmiaobeng/chinese-bert-wwm-finetuned-jd | 4 | null | transformers | 19,337 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: chinese-bert-wwm-finetuned-jd
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# chinese-bert-wwm-finetuned-jd
This model is a fine-tuned version of [hfl/chinese-bert-wwm](https://huggingface.co/hfl/chinese-bert-wwm) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 2.9340
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.1648 | 1.0 | 5 | 2.9366 |
| 3.0095 | 2.0 | 10 | 2.9487 |
| 3.0698 | 3.0 | 15 | 2.9177 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu102
- Datasets 2.1.0
- Tokenizers 0.12.1
|
TracyWang/t5-small-finetuned-xsum | e1afac4b586c6e1c7e3796d9f871a09e109aa407 | 2022-04-19T07:50:07.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:xsum",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | TracyWang | null | TracyWang/t5-small-finetuned-xsum | 4 | null | transformers | 19,338 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- xsum
model-index:
- name: t5-small-finetuned-xsum
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-xsum
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the xsum dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Framework versions
- Transformers 4.18.0
- Pytorch 1.10.0+cu111
- Datasets 2.1.0
- Tokenizers 0.12.1
|
waboucay/camembert-base-finetuned-nli-repnum_wl-rua_wl | a34a4742c580e7c608ce0ae9b014dbcb97d3de63 | 2022-04-21T15:10:51.000Z | [
"pytorch",
"camembert",
"text-classification",
"fr",
"transformers",
"nli"
] | text-classification | false | waboucay | null | waboucay/camembert-base-finetuned-nli-repnum_wl-rua_wl | 4 | null | transformers | 19,339 | ---
language:
- fr
tags:
- nli
metrics:
- f1
---
## Eval results
We obtain the following results on ```validation``` and ```test``` sets:
| Set | F1<sub>micro</sub> | F1<sub>macro</sub> |
|------------|--------------------|--------------------|
| validation | 73.5 | 73.5 |
| test | 75.5 | 75.5 | |
waboucay/camembert-base-finetuned-nli-xnli_fr-repnum_wl-rua_wl | b27130538c4c22865a9d83a05dbc2441c94cfa2a | 2022-04-21T15:15:18.000Z | [
"pytorch",
"camembert",
"text-classification",
"fr",
"transformers",
"nli"
] | text-classification | false | waboucay | null | waboucay/camembert-base-finetuned-nli-xnli_fr-repnum_wl-rua_wl | 4 | null | transformers | 19,340 | ---
language:
- fr
tags:
- nli
metrics:
- f1
---
## Eval results
We obtain the following results on ```validation``` and ```test``` sets:
| Set | F1<sub>micro</sub> | F1<sub>macro</sub> |
|------------|--------------------|--------------------|
| validation | 83.1 | 82.2 |
| test | 86.0 | 85.0 | |
intellisr/autotrain-twitterMbti-758223271 | 38d86c7fcc0ea35a3d57bafa40c3345352dc6456 | 2022-04-19T14:18:50.000Z | [
"pytorch",
"bert",
"text-classification",
"en",
"dataset:intellisr/autotrain-data-twitterMbti",
"transformers",
"autotrain",
"co2_eq_emissions"
] | text-classification | false | intellisr | null | intellisr/autotrain-twitterMbti-758223271 | 4 | null | transformers | 19,341 | ---
tags: autotrain
language: en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- intellisr/autotrain-data-twitterMbti
co2_eq_emissions: 0.3313142450338848
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 758223271
- CO2 Emissions (in grams): 0.3313142450338848
## Validation Metrics
- Loss: 1.2496932744979858
- Accuracy: 0.6438828259620908
- Macro F1: 0.5757131072506373
- Micro F1: 0.6438828259620908
- Weighted F1: 0.6401462906378685
- Macro Precision: 0.6279826743318115
- Micro Precision: 0.6438828259620908
- Weighted Precision: 0.6479595607607238
- Macro Recall: 0.5436771609966322
- Micro Recall: 0.6438828259620908
- Weighted Recall: 0.6438828259620908
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/intellisr/autotrain-twitterMbti-758223271
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("intellisr/autotrain-twitterMbti-758223271", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("intellisr/autotrain-twitterMbti-758223271", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
hanxiong/distilbert-base-uncased-finetuned-cola | 5700dc1436a9f1474c7394260c657e3397dabfe4 | 2022-04-20T02:10:02.000Z | [
"pytorch",
"distilbert",
"text-classification",
"transformers"
] | text-classification | false | hanxiong | null | hanxiong/distilbert-base-uncased-finetuned-cola | 4 | null | transformers | 19,342 | Entry not found |
irmgnrtop/roberta-finetuned-error-detection | bd0f9fb4b836c001108786f5047b1e6728fb7cb3 | 2022-04-19T20:01:06.000Z | [
"pytorch",
"tensorboard",
"roberta",
"token-classification",
"transformers",
"autotrain_compatible"
] | token-classification | false | irmgnrtop | null | irmgnrtop/roberta-finetuned-error-detection | 4 | null | transformers | 19,343 | Entry not found |
GPL/dbpedia-entity-msmarco-distilbert-gpl | 98a7c075f0ee63f12d63e3bfdf311858dec34603 | 2022-04-19T15:13:29.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/dbpedia-entity-msmarco-distilbert-gpl | 4 | null | sentence-transformers | 19,344 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GPL/quora-msmarco-distilbert-gpl | 308a1cab477f4a04d3fdc28a8de50949e54ef784 | 2022-04-19T15:15:18.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/quora-msmarco-distilbert-gpl | 4 | null | sentence-transformers | 19,345 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GPL/nfcorpus-tsdae-msmarco-distilbert-gpl | c2ea2bd85b18f26e2e80ed697087e714df36a5f7 | 2022-04-19T15:25:07.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/nfcorpus-tsdae-msmarco-distilbert-gpl | 4 | null | sentence-transformers | 19,346 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
GPL/scidocs-tsdae-msmarco-distilbert-gpl | 026375e33eb4f149a94e8b5adcbb642e87eaaec9 | 2022-04-19T15:27:32.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"sentence-transformers",
"sentence-similarity",
"transformers"
] | sentence-similarity | false | GPL | null | GPL/scidocs-tsdae-msmarco-distilbert-gpl | 4 | null | sentence-transformers | 19,347 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 140000 with parameters:
```
{'batch_size': 32, 'sampler': 'torch.utils.data.sampler.SequentialSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`gpl.toolkit.loss.MarginDistillationLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 140000,
"warmup_steps": 1000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
nielsr/segformer-trainer-test | 92f73207233eea18d6c7a44dd3fbee88e86b7c52 | 2022-04-19T20:19:47.000Z | [
"pytorch",
"tensorboard",
"segformer",
"transformers",
"image-segmentation",
"vision",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | image-segmentation | false | nielsr | null | nielsr/segformer-trainer-test | 4 | null | transformers | 19,348 | ---
license: apache-2.0
tags:
- image-segmentation
- vision
- generated_from_trainer
widget:
- src: https://segmentsai-prod.s3.eu-west-2.amazonaws.com/assets/admin-tobias/439f6843-80c5-47ce-9b17-0b2a1d54dbeb.jpg
example_title: Brugge
model-index:
- name: segformer-trainer-test
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# segformer-trainer-test
This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the segments/sidewalk-semantic dataset.
It achieves the following results on the evaluation set:
- Loss: 1.3886
- Mean Iou: 0.1391
- Mean Accuracy: 0.1905
- Overall Accuracy: 0.7192
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10.0
### Training results
### Framework versions
- Transformers 4.19.0.dev0
- Pytorch 1.11.0+cu113
- Datasets 2.0.0
- Tokenizers 0.11.6
|
GPL/trec-news-tsdae-msmarco-distilbert-margin-mse | 09eda6d9fbe8657b207317645eaf97200815771c | 2022-04-19T16:46:27.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | GPL | null | GPL/trec-news-tsdae-msmarco-distilbert-margin-mse | 4 | null | transformers | 19,349 | Entry not found |
GPL/trec-covid-tsdae-msmarco-distilbert-margin-mse | db6514c42b81e0baf9e4a5cab482db4d79caa1a6 | 2022-04-19T16:47:22.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | GPL | null | GPL/trec-covid-tsdae-msmarco-distilbert-margin-mse | 4 | null | transformers | 19,350 | Entry not found |
Intel/bert-base-uncased-finetuned-swag-int8-static | aba9433bbeb71e21ec4a2644abb776a262013f61 | 2022-06-10T02:42:12.000Z | [
"pytorch",
"bert",
"multiple-choice",
"en",
"dataset:swag",
"transformers",
"int8",
"Intel® Neural Compressor",
"PostTrainingStatic",
"license:apache-2.0",
"model-index"
] | multiple-choice | false | Intel | null | Intel/bert-base-uncased-finetuned-swag-int8-static | 4 | null | transformers | 19,351 | ---
language:
- en
license: apache-2.0
tags:
- multiple-choice
- int8
- Intel® Neural Compressor
- PostTrainingStatic
datasets:
- swag
metrics:
- accuracy
model-index:
- name: bert-base-uncased-finetuned-swag-int8-static
results:
- task:
name: Multiple-choice
type: multiple-choice
dataset:
name: Swag
type: swag
metrics:
- name: Accuracy
type: accuracy
value: 0.7838148474693298
---
# INT8 bert-base-uncased-finetuned-swag
### Post-training static quantization
This is an INT8 PyTorch model quantized with [Intel® Neural Compressor](https://github.com/intel/neural-compressor).
The original fp32 model comes from the fine-tuned model [thyagosme/bert-base-uncased-finetuned-swag](https://huggingface.co/thyagosme/bert-base-uncased-finetuned-swag).
The calibration dataloader is the train dataloader. The default calibration sampling size 100 isn't divisible exactly by batch size 8, so the real sampling size is 104.
The linear modules **bert.encoder.layer.2.output.dense, bert.encoder.layer.5.intermediate.dense, bert.encoder.layer.9.output.dense, bert.encoder.layer.10.output.dense** fall back to fp32 to meet the 1% relative accuracy loss.
### Test result
| |INT8|FP32|
|---|:---:|:---:|
| **Accuracy (eval-accuracy)** |0.7838|0.7915|
| **Model size (MB)** |133|418|
### Load with Intel® Neural Compressor:
```python
from neural_compressor.utils.load_huggingface import OptimizedModel
int8_model = OptimizedModel.from_pretrained(
'Intel/bert-base-uncased-finetuned-swag-int8-static',
)
```
|
patrickvonplaten/data2vec-audio-base-960h-4-gram | 82f08be70f49eaa7f360f6d0714f9c3509e61a0e | 2022-05-24T11:09:21.000Z | [
"pytorch",
"data2vec-audio",
"automatic-speech-recognition",
"en",
"dataset:librispeech_asr",
"arxiv:2202.03555",
"transformers",
"speech",
"hf-asr-leaderboard",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | patrickvonplaten | null | patrickvonplaten/data2vec-audio-base-960h-4-gram | 4 | null | transformers | 19,352 | ---
language: en
datasets:
- librispeech_asr
tags:
- speech
- hf-asr-leaderboard
license: apache-2.0
widget:
- example_title: Librispeech sample 1
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
- example_title: Librispeech sample 2
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
model-index:
- name: data2vec-audio-base-960h
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: LibriSpeech (clean)
type: librispeech_asr
config: clean
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 2.77
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: LibriSpeech (other)
type: librispeech_asr
config: other
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 7.08
---
# Data2Vec-Audio-Base-960h
[Facebook's Data2Vec](https://ai.facebook.com/research/data2vec-a-general-framework-for-self-supervised-learning-in-speech-vision-and-language/)
The base model pretrained and fine-tuned on 960 hours of Librispeech on 16kHz sampled speech audio. When using the model
make sure that your speech input is also sampled at 16Khz.
[Paper](https://arxiv.org/abs/2202.03555)
Authors: Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli
**Abstract**
While the general idea of self-supervised learning is identical across modalities, the actual algorithms and objectives differ widely because they were developed with a single modality in mind. To get us closer to general self-supervised learning, we present data2vec, a framework that uses the same learning method for either speech, NLP or computer vision. The core idea is to predict latent representations of the full input data based on a masked view of the input in a self-distillation setup using a standard Transformer architecture. Instead of predicting modality-specific targets such as words, visual tokens or units of human speech which are local in nature, data2vec predicts contextualized latent representations that contain information from the entire input. Experiments on the major benchmarks of speech recognition, image classification, and natural language understanding demonstrate a new state of the art or competitive performance to predominant approaches.
The original model can be found under https://github.com/pytorch/fairseq/tree/main/examples/data2vec .
# Pre-Training method

For more information, please take a look at the [official paper](https://arxiv.org/abs/2202.03555).
# Usage
To transcribe audio files the model can be used as a standalone acoustic model as follows:
```python
from transformers import Wav2Vec2Processor, Data2VecForCTC
from datasets import load_dataset
import torch
# load model and processor
processor = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h")
model = Data2VecForCTC.from_pretrained("facebook/data2vec-audio-base-960h")
# load dummy dataset and read soundfiles
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
# tokenize
input_values = processor(ds[0]["audio"]["array"],, return_tensors="pt", padding="longest").input_values # Batch size 1
# retrieve logits
logits = model(input_values).logits
# take argmax and decode
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)
```
## Evaluation
This code snippet shows how to evaluate **facebook/data2vec-audio-base-960h** on LibriSpeech's "clean" and "other" test data.
```python
from transformers import Wav2Vec2Processor, Data2VecForCTC
from datasets import load_dataset
import torch
from jiwer import wer
# load model and processor
processor = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h").to("cuda")
model = Data2VecForCTC.from_pretrained("facebook/data2vec-audio-base-960h")
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
def map_to_pred(batch):
input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest").input_values
with torch.no_grad():
logits = model(input_values.to("cuda")).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)
batch["transcription"] = transcription
return batch
result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1, remove_columns=["audio"])
print("WER:", wer(result["text"], result["transcription"]))
```
*Result (WER)*:
| "clean" | "other" |
|---|---|
| 2.77 | 7.08 | |
mwong/roberta-base-climate-evidence-related | 316363dbe901a92c2e19a39a89f88d4f0ae17fc0 | 2022-06-24T03:34:04.000Z | [
"pytorch",
"roberta",
"text-classification",
"en",
"dataset:mwong/fever-evidence-related",
"dataset:mwong/climate-evidence-related",
"transformers",
"text classification",
"fact checking",
"license:mit"
] | text-classification | false | mwong | null | mwong/roberta-base-climate-evidence-related | 4 | 1 | transformers | 19,353 | ---
language: en
license: mit
tags:
- text classification
- fact checking
datasets:
- mwong/fever-evidence-related
- mwong/climate-evidence-related
widget:
- text: "Earth’s changing climate is a critical issue and poses the risk of significant environmental, social and economic disruptions around the globe.</s></s>Because of fears of climate change and adverse effects of drilling explosions and oil spills in the Gulf of Mexico, legislation has been considered, and governmental regulations and orders have been issued, which, combined with the local economic and employment conditions caused by both, could materially adversely impact the oil and gas industries and the economic health of areas in which a significant number of our stores are located."
example_title: "Evidence related to claim"
metrics: f1
---
# ClimateRoberta
ClimateRoberta is a classifier model that predicts if climate related evidence is related to query claim. The model achieved F1 score of 80.13% with test dataset "mwong/climate-evidence-related". Using pretrained roberta-base model, the classifier head is trained on Fever dataset and adapted to climate domain using ClimateFever dataset. |
Jeevesh8/feather_berts_0 | 5aadd91f02fbfcf72850e05e830276ed6867e000 | 2022-04-20T13:11:48.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_0 | 4 | null | transformers | 19,354 | Entry not found |
Jeevesh8/feather_berts_1 | 18dfb7fbccbe74daeb79fb85fa733a978216888f | 2022-04-20T13:13:34.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_1 | 4 | null | transformers | 19,355 | Entry not found |
Jeevesh8/feather_berts_2 | 0d7a8ce3b7b2f6c503cf2f95f4a2db126f08b5de | 2022-04-20T13:13:57.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_2 | 4 | null | transformers | 19,356 | Entry not found |
Jeevesh8/feather_berts_3 | bee63da864326930d7b54dd901807970563f1284 | 2022-04-20T13:14:23.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_3 | 4 | null | transformers | 19,357 | Entry not found |
Jeevesh8/feather_berts_4 | 647d7193902b7b1cf1419003355b2492672eb29c | 2022-04-20T13:14:47.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_4 | 4 | null | transformers | 19,358 | Entry not found |
Jeevesh8/feather_berts_5 | cb092e34a2757d68e9a5988cd7edcfa3845a9cb7 | 2022-04-20T13:15:12.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_5 | 4 | null | transformers | 19,359 | Entry not found |
Jeevesh8/feather_berts_6 | 2c61c43682042708a1594dc246ce81ffe982a7ea | 2022-04-20T13:15:38.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_6 | 4 | null | transformers | 19,360 | Entry not found |
Jeevesh8/feather_berts_7 | e5e717255fbedffae6f9e68da102ebc551343dcd | 2022-04-20T13:16:03.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_7 | 4 | null | transformers | 19,361 | Entry not found |
Jeevesh8/feather_berts_8 | 39e7b694ed2a10ae89f9802eb0b0d71cf1d8a06c | 2022-04-20T13:16:28.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_8 | 4 | null | transformers | 19,362 | Entry not found |
Jeevesh8/feather_berts_9 | eb5465710b5050b04880d14e37d70e048cb749d4 | 2022-04-20T13:16:54.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_9 | 4 | null | transformers | 19,363 | Entry not found |
Jeevesh8/feather_berts_10 | 38b9d9d39d327c0a809635816a6a4cdccb672c24 | 2022-04-20T13:17:19.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_10 | 4 | null | transformers | 19,364 | Entry not found |
Jeevesh8/feather_berts_11 | 869d38d71e9a1437240d889e76b9efbff3406845 | 2022-04-20T13:17:44.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_11 | 4 | null | transformers | 19,365 | Entry not found |
Jeevesh8/feather_berts_12 | 09d235ef03582af82d0b30782bbddecfcbfd1d96 | 2022-04-20T13:18:09.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_12 | 4 | null | transformers | 19,366 | Entry not found |
Jeevesh8/feather_berts_13 | 8f72900b9c84fafd3f10b1473af59bdd26a16dd6 | 2022-04-20T13:18:34.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_13 | 4 | null | transformers | 19,367 | Entry not found |
Jeevesh8/feather_berts_14 | adecbdb764d90a5ff0b88af390dde7e6713f4a61 | 2022-04-20T13:19:00.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_14 | 4 | null | transformers | 19,368 | Entry not found |
Jeevesh8/feather_berts_15 | 19cb407828e8275b34132ab151f1053c652799b4 | 2022-04-20T13:19:25.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_15 | 4 | null | transformers | 19,369 | Entry not found |
Jeevesh8/feather_berts_16 | 4005b4ca83120fff587adffd11f097b7c4ad3fb7 | 2022-04-20T13:19:50.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_16 | 4 | null | transformers | 19,370 | Entry not found |
Jeevesh8/feather_berts_17 | 659c977c709f0004371454023e406de432bacbc6 | 2022-04-20T13:20:15.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_17 | 4 | null | transformers | 19,371 | Entry not found |
Jeevesh8/feather_berts_18 | 2bd597915fa00be232f0cb6ff0fb33b8584bc46e | 2022-04-20T13:20:41.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_18 | 4 | null | transformers | 19,372 | Entry not found |
Jeevesh8/feather_berts_21 | e5860302af32921eca05a2d7e13c48f1fd98c22b | 2022-04-20T13:21:58.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_21 | 4 | null | transformers | 19,373 | Entry not found |
Jeevesh8/feather_berts_22 | 9d6be3df9b89cb818d603484b10453a85cfed7f1 | 2022-04-20T13:22:23.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_22 | 4 | null | transformers | 19,374 | Entry not found |
Jeevesh8/feather_berts_23 | 2de9d163b3cf2f7ca1ccea91d5774e075e54c7b0 | 2022-04-20T13:22:49.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_23 | 4 | null | transformers | 19,375 | Entry not found |
Jeevesh8/feather_berts_24 | 63a2d58abfa0ec495533ec46b604622bc87047ca | 2022-04-20T13:23:15.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_24 | 4 | null | transformers | 19,376 | Entry not found |
Jeevesh8/feather_berts_25 | a7c9f20e5983376b1912c3e27fdd0d92bc730fc1 | 2022-04-20T13:23:41.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_25 | 4 | null | transformers | 19,377 | Entry not found |
Jeevesh8/feather_berts_26 | d0efe1e6ca3d19e911f6d20c23f55ab8bb3620e0 | 2022-04-20T13:24:07.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_26 | 4 | null | transformers | 19,378 | Entry not found |
Jeevesh8/feather_berts_27 | 63b9057d703044188f9f2fe1387d4709309e9ed5 | 2022-04-20T13:24:33.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_27 | 4 | null | transformers | 19,379 | Entry not found |
Jeevesh8/feather_berts_29 | 46a14ceee12fded8faa8c1b98496e0002ab42efb | 2022-04-20T13:25:26.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_29 | 4 | null | transformers | 19,380 | Entry not found |
Jeevesh8/feather_berts_30 | db62c83484600bf1a8c9ea4564f3497181b6639f | 2022-04-20T13:25:51.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_30 | 4 | null | transformers | 19,381 | Entry not found |
Jeevesh8/feather_berts_31 | 63f55192fbe42b9fdb4b7939c1feec7ffe5180c0 | 2022-04-20T13:26:16.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_31 | 4 | null | transformers | 19,382 | Entry not found |
Jeevesh8/feather_berts_32 | bbdc3955dfd72cd07657cd2d2945d5b4b488ace4 | 2022-04-20T13:26:42.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_32 | 4 | null | transformers | 19,383 | Entry not found |
Jeevesh8/feather_berts_33 | fd1c3c86b9fe6b559e81a8f289ebe87373c6e199 | 2022-04-20T13:27:08.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_33 | 4 | null | transformers | 19,384 | Entry not found |
Jeevesh8/feather_berts_34 | d82104bb835981083d3192de1910964f2895633e | 2022-04-20T13:27:34.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_34 | 4 | null | transformers | 19,385 | Entry not found |
Jeevesh8/feather_berts_35 | dfc7503b2398cbcaed7fb6bc5284ea8fd6198fd9 | 2022-04-20T13:27:59.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_35 | 4 | null | transformers | 19,386 | Entry not found |
Jeevesh8/feather_berts_36 | d8f65400b7de9c39d59055b03c9b5165326caab2 | 2022-04-20T13:28:25.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_36 | 4 | null | transformers | 19,387 | Entry not found |
Jeevesh8/feather_berts_37 | d24f0e1fb26c372944b8ad1d5ef8c5a3cb734165 | 2022-04-20T13:28:50.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_37 | 4 | null | transformers | 19,388 | Entry not found |
Jeevesh8/feather_berts_38 | 6741e33f6aec5b8d74bed0c3064ad6e31f2268b1 | 2022-04-20T13:29:16.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_38 | 4 | null | transformers | 19,389 | Entry not found |
Jeevesh8/feather_berts_39 | 57c0bb4a8e73b8f9b9eef18b4f399e639949f198 | 2022-04-20T13:29:42.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_39 | 4 | null | transformers | 19,390 | Entry not found |
Jeevesh8/feather_berts_40 | 3b1150c55eb19d55bc0b3c64b47c00099c2b2510 | 2022-04-20T13:30:08.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_40 | 4 | null | transformers | 19,391 | Entry not found |
Jeevesh8/feather_berts_41 | 91d553b0603463f923c795a95cf0f6a702230a2d | 2022-04-20T13:30:32.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_41 | 4 | null | transformers | 19,392 | Entry not found |
Jeevesh8/feather_berts_42 | d0fbc3c87dec977a9218250c542e7225df1493f3 | 2022-04-20T13:30:58.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_42 | 4 | null | transformers | 19,393 | Entry not found |
Jeevesh8/feather_berts_43 | e551dc9bb18261afd90c4c3213231dba8ab9e72f | 2022-04-20T13:31:24.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_43 | 4 | null | transformers | 19,394 | Entry not found |
Jeevesh8/feather_berts_45 | 6552ec31ffc9626a2aeefcf75643f2007bfa62cd | 2022-04-20T13:32:16.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_45 | 4 | null | transformers | 19,395 | Entry not found |
Jeevesh8/feather_berts_46 | 601d1cc8623516e0a31bbde28d5588a6698f86c6 | 2022-04-20T13:32:42.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_46 | 4 | null | transformers | 19,396 | Entry not found |
Jeevesh8/feather_berts_47 | faa9f53f2d3999a82f77c710b442437dc06fc97d | 2022-04-20T13:33:06.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_47 | 4 | null | transformers | 19,397 | Entry not found |
Jeevesh8/feather_berts_48 | f675cd8bcda20d93a479db049e6aa0907e86b173 | 2022-04-20T13:33:32.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_48 | 4 | null | transformers | 19,398 | Entry not found |
Jeevesh8/feather_berts_49 | ecc3900aaca3aebe2fee4116087870047e88c792 | 2022-04-20T13:33:58.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | false | Jeevesh8 | null | Jeevesh8/feather_berts_49 | 4 | null | transformers | 19,399 | Entry not found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.