modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
list | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Jeevesh8/bert_ft_qqp-67 | 29b5b9da2ba50f1e8c8e19ee719caeab86b097e7 | 2022-05-09T12:21:25.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-67 | 8 | null | transformers | 13,400 | Entry not found |
Jeevesh8/bert_ft_qqp-68 | ea801cdbef20e344a83bb2638651cca3c8883e6e | 2022-05-09T12:23:56.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-68 | 8 | null | transformers | 13,401 | Entry not found |
Jeevesh8/bert_ft_qqp-69 | c6cfa483596fe692f6f684a49ba5665bb55eb760 | 2022-05-09T12:26:26.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-69 | 8 | null | transformers | 13,402 | Entry not found |
Jeevesh8/bert_ft_qqp-70 | e40599a4ef7dd7717d55800f2aa9f2f8952a9a15 | 2022-05-09T12:29:01.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-70 | 8 | null | transformers | 13,403 | Entry not found |
Jeevesh8/bert_ft_qqp-71 | a9452ba852eff815697ec7c78bdf33b3c7443021 | 2022-05-09T12:31:35.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-71 | 8 | null | transformers | 13,404 | Entry not found |
Jeevesh8/bert_ft_qqp-72 | fd73c63063aab96a06e17900bdb5e7e24e7e23fb | 2022-05-09T12:34:11.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-72 | 8 | null | transformers | 13,405 | Entry not found |
Jeevesh8/bert_ft_qqp-73 | 31b3ea1346e73db0684603aa3380772f301d3991 | 2022-05-09T12:36:44.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-73 | 8 | null | transformers | 13,406 | Entry not found |
Jeevesh8/bert_ft_qqp-74 | 51faa9fe5fbcd5ad65e361e72c17cf0aa325efed | 2022-05-09T12:39:25.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-74 | 8 | null | transformers | 13,407 | Entry not found |
Jeevesh8/bert_ft_qqp-75 | 52f798447356c2fe222d88e0c7f9a23a10871440 | 2022-05-09T12:41:59.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-75 | 8 | null | transformers | 13,408 | Entry not found |
Jeevesh8/bert_ft_qqp-77 | e626bba19ec8eb76162cc4c48554819303ffc257 | 2022-05-09T12:47:10.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-77 | 8 | null | transformers | 13,409 | Entry not found |
Jeevesh8/bert_ft_qqp-78 | 211c088303d2ddf7116177f3d32c793e69c2a64c | 2022-05-09T12:49:47.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-78 | 8 | null | transformers | 13,410 | Entry not found |
Jeevesh8/bert_ft_qqp-79 | beaabf75a3d843d3e0aa550f6ff28f51b8056a4f | 2022-05-09T12:52:23.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-79 | 8 | null | transformers | 13,411 | Entry not found |
Jeevesh8/bert_ft_qqp-80 | 53beaa56f7bb1fbca41b882ad5c92c2746334a90 | 2022-05-09T12:54:57.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-80 | 8 | null | transformers | 13,412 | Entry not found |
Jeevesh8/bert_ft_qqp-81 | 8ae053d6f7891efa52b9d1495fc9660be3be4ae8 | 2022-05-09T12:57:30.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-81 | 8 | null | transformers | 13,413 | Entry not found |
Jeevesh8/bert_ft_qqp-82 | 14f87f86fbf9cacebb71b6617553048350f8fff9 | 2022-05-09T13:00:04.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-82 | 8 | null | transformers | 13,414 | Entry not found |
Jeevesh8/bert_ft_qqp-84 | 0db21e468244121343d614222e273e02c6c26f8c | 2022-05-09T13:05:12.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-84 | 8 | null | transformers | 13,415 | Entry not found |
Jeevesh8/bert_ft_qqp-85 | f22f7c696198eb54f231a3658b9d49eac348cbdd | 2022-05-09T13:07:44.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-85 | 8 | null | transformers | 13,416 | Entry not found |
Jeevesh8/bert_ft_qqp-87 | d0516e2f71fef48bc9fa33fe1fe7a718b3c4035b | 2022-05-09T13:12:48.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-87 | 8 | null | transformers | 13,417 | Entry not found |
Jeevesh8/bert_ft_qqp-88 | f64a81c42aa5d45aadecf4b699fbf5a5b3a38487 | 2022-05-09T13:15:20.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-88 | 8 | null | transformers | 13,418 | Entry not found |
Jeevesh8/bert_ft_qqp-89 | d99247f37677c5603f523c18a50bbf991d38e8e9 | 2022-05-09T13:17:55.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-89 | 8 | null | transformers | 13,419 | Entry not found |
Jeevesh8/bert_ft_qqp-90 | 62a7735ab1a255c4cedd630700ac3d2640cddd40 | 2022-05-09T13:20:28.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-90 | 8 | null | transformers | 13,420 | Entry not found |
Jeevesh8/bert_ft_qqp-91 | b37b1853de735a0c7a9c88e3c6f05269e721e1fe | 2022-05-09T13:22:59.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-91 | 8 | null | transformers | 13,421 | Entry not found |
Jeevesh8/bert_ft_qqp-92 | 4660394cec78461b551822615881290dc63f45a0 | 2022-05-09T13:25:31.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-92 | 8 | null | transformers | 13,422 | Entry not found |
Jeevesh8/bert_ft_qqp-93 | 1121e55fce9a3a7467777364bd3713056dabcf29 | 2022-05-09T13:28:05.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-93 | 8 | null | transformers | 13,423 | Entry not found |
Jeevesh8/bert_ft_qqp-94 | 6b26e4fa2d72f9a96c5c4311670729adfad6afc2 | 2022-05-09T13:30:41.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-94 | 8 | null | transformers | 13,424 | Entry not found |
Jeevesh8/bert_ft_qqp-95 | e12a0ad5d00bc66cad9c20b742e7c997b25f5ffb | 2022-05-09T13:33:16.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-95 | 8 | null | transformers | 13,425 | Entry not found |
Jeevesh8/bert_ft_qqp-96 | d1268808d2d1e1103df6f471b051572ffbbc668e | 2022-05-09T13:35:45.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-96 | 8 | null | transformers | 13,426 | Entry not found |
Jeevesh8/bert_ft_qqp-98 | 2414c7f8926b61222a6d1ba0f0f0116f9c217a18 | 2022-05-09T13:40:55.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/bert_ft_qqp-98 | 8 | null | transformers | 13,427 | Entry not found |
Nakul24/RoBERTa-Goemotions-6 | 145bc199c418598a0a474a4dd613569af0ac556d | 2022-05-10T00:18:06.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
]
| text-classification | false | Nakul24 | null | Nakul24/RoBERTa-Goemotions-6 | 8 | 1 | transformers | 13,428 | Entry not found |
akozlo/con_gpt_med | b8ea2c46df1bdc255908f9fd046c5959b13a0534 | 2022-05-10T12:52:01.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"model-index"
]
| text-generation | false | akozlo | null | akozlo/con_gpt_med | 8 | null | transformers | 13,429 | ---
tags:
- generated_from_trainer
model-index:
- name: con_gpt_med_model
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# con_gpt_med_model
This model is a fine-tuned version of [gpt2-medium](https://huggingface.co/gpt2-medium) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 2
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 8
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2.0
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0+cu113
- Datasets 2.0.0
- Tokenizers 0.11.6
hello
|
aakorolyova/primary_outcome_extraction | 3ac285a05fac48e493543bef239da2e59775a68f | 2022-05-25T19:31:14.000Z | [
"pytorch",
"tf",
"bert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | false | aakorolyova | null | aakorolyova/primary_outcome_extraction | 8 | null | transformers | 13,430 | <h1>Model description</h1>
This is a fine-tuned BioBERT model for extracting primary outcomes from articles reporting clinical trials.
This is the second version of the model; the original model development was reported in:
Anna Koroleva, Sanjay Kamath, Patrick Paroubek. Extracting primary and reported outcomes from articles reporting randomized controlled trials using pre-trained deep language representations. Preprint: https://easychair.org/publications/preprint/qpml
The original work was conducted within the scope of the Assisted authoring for avoiding inadequate claims in scientific reporting PhD project of the Methods for Research on Research (MiRoR, http://miror-ejd.eu/) program.
Model creator: Anna Koroleva
<h1>Intended uses & limitations</h1>
The model is intended to be used for extracting primary outcomes from texts of clinical trials.
The main limitation is that the model was trained on a fairly small (2000 sentences) sample of data annotated by a single annotator. Annotating more data or involvig more annotators was not possiblw within the PhD project.
Another possible issue with the model use if the complex nature of outcomes: a typical description of an outcome can include the outcome name, measurement tool, timepoints, e.g. "Health-Related Quality of Life at 12 months, measured using the Assessment of Quality of Life instrument". Ideally, this should be broken into 3 separate entities ("Health-Related Quality of Life" - outcome", "at 12 months" - timepoint", "the Assessment of Quality of Life instrument" - measurement tool), and relation between the three should be extracted to capture all the outcome-related information. However, in our annotation we annotated this type of examples as a sinale outcome entity.
<h1>How to use</h1>
The model should be used with the BioBERT tokeniser. A sample code for getting model predictions is below:
```
import numpy as np
from transformers import AutoTokenizer
from transformers import AutoModelForTokenClassification
tokenizer = AutoTokenizer.from_pretrained('dmis-lab/biobert-v1.1')
model = AutoModelForTokenClassification.from_pretrained(r'aakorolyova/primary_outcome_extraction')
text = 'Primary endpoints were overall survival in patients with oesophageal squamous cell carcinoma and PD-L1 combined positive score (CPS) of 10 or more, and overall survival and progression-free survival in patients with oesophageal squamous cell carcinoma, PD-L1 CPS of 10 or more, and in all randomised patients.'
encoded_input = tokenizer(text, padding=True, truncation=True, max_length=2000, return_tensors='pt')
output = model(**encoded_input)['logits']
output = np.argmax(output.detach().numpy(), axis=2)
print(output)
```
Some more useful functions can be found in or Github repository: https://github.com/aakorolyova/DeSpin-2.0
<h1>Training data</h1>
Training data can be found in https://github.com/aakorolyova/DeSpin-2.0/tree/main/data/Primary_Outcomes
<h1>Training procedure</h1>
The model was fine-tuned using Huggingface Trainer API. Training scripts can be found in https://github.com/aakorolyova/DeSpin-2.0
<h1>Evaluation</h1>
Precision: 74.41%
Recall: 88.7%
F1: 80.93%
|
ruselkomp/sber-full-framebank | 4d1afd8a15cb001f8a3d635eb209eaea249a7fe0 | 2022-05-12T21:32:41.000Z | [
"pytorch",
"tensorboard",
"bert",
"question-answering",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
]
| question-answering | false | ruselkomp | null | ruselkomp/sber-full-framebank | 8 | null | transformers | 13,431 | ---
tags:
- generated_from_trainer
model-index:
- name: tests-finetuned-squad-full
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# tests-finetuned-squad-full
This model is a fine-tuned version of [sberbank-ai/sbert_large_nlu_ru](https://huggingface.co/sberbank-ai/sbert_large_nlu_ru) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.5672
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 1.0601 | 1.0 | 11307 | 1.0849 |
| 0.6918 | 2.0 | 22614 | 1.1588 |
| 0.4071 | 3.0 | 33921 | 1.5672 |
### Framework versions
- Transformers 4.19.0.dev0
- Pytorch 1.11.0+cu113
- Datasets 2.2.2.dev0
- Tokenizers 0.12.1
|
enoriega/kw_pubmed_1000_0.0003 | 3c6df44aacd8e5a587f786f2aabf0790332f6b48 | 2022-05-10T20:10:43.000Z | [
"pytorch",
"bert",
"fill-mask",
"dataset:keyword_pubmed_dataset",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
]
| fill-mask | false | enoriega | null | enoriega/kw_pubmed_1000_0.0003 | 8 | null | transformers | 13,432 | ---
license: mit
tags:
- generated_from_trainer
datasets:
- keyword_pubmed_dataset
metrics:
- accuracy
model-index:
- name: kw_pubmed_1000_0.0003
results:
- task:
name: Masked Language Modeling
type: fill-mask
dataset:
name: keyword_pubmed_dataset
type: keyword_pubmed_dataset
args: sentence
metrics:
- name: Accuracy
type: accuracy
value: 0.33938523162661094
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# kw_pubmed_1000_0.0003
This model is a fine-tuned version of [microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) on the keyword_pubmed_dataset dataset.
It achieves the following results on the evaluation set:
- Loss: 4.7086
- Accuracy: 0.3394
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- gradient_accumulation_steps: 250
- total_train_batch_size: 8000
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| No log | 0.09 | 4 | 4.3723 | 0.3436 |
| 6.0386 | 0.17 | 8 | 4.2113 | 0.3442 |
| 3.7573 | 0.26 | 12 | 4.2079 | 0.3634 |
| 2.9944 | 0.35 | 16 | 4.3370 | 0.3513 |
| 2.7048 | 0.44 | 20 | 4.8594 | 0.3067 |
| 2.7048 | 0.52 | 24 | 4.4929 | 0.3383 |
| 2.9458 | 0.61 | 28 | 4.5146 | 0.3408 |
| 2.3783 | 0.7 | 32 | 4.5680 | 0.3430 |
| 2.2485 | 0.78 | 36 | 4.5095 | 0.3477 |
| 2.1701 | 0.87 | 40 | 4.4971 | 0.3449 |
| 2.1701 | 0.96 | 44 | 4.7051 | 0.3321 |
| 2.0861 | 1.07 | 48 | 4.7615 | 0.3310 |
| 2.4168 | 1.15 | 52 | 4.7086 | 0.3394 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0
- Datasets 2.1.0
- Tokenizers 0.12.1
|
dragonSwing/xlm-roberta-capu | 0a50bebf0113b2552df5a5513a7ec4fdf5c826d5 | 2022-05-17T15:03:20.000Z | [
"pytorch",
"bert",
"vi",
"dataset:oscar-corpus/OSCAR-2109",
"transformers",
"capitalization",
"punctuation",
"token-classification",
"license:cc-by-sa-4.0"
]
| token-classification | false | dragonSwing | null | dragonSwing/xlm-roberta-capu | 8 | null | transformers | 13,433 | ---
language:
- vi
tags:
- capitalization
- punctuation
- token-classification
license: cc-by-sa-4.0
datasets:
- oscar-corpus/OSCAR-2109
metrics:
- accuracy
- precision
- recall
- f1
---
# ✨ xlm-roberta-capitalization-punctuation
This a [XLM-RoBERTa](https://huggingface.co/xlm-roberta-base) model finetuned for Vietnamese punctuation restoration on the [OSCAR-2109](https://huggingface.co/datasets/oscar-corpus/OSCAR-2109) dataset.
The model predicts the punctuation and upper-casing of plain, lower-cased text. An example use case can be ASR output. Or other cases when text has lost punctuation.
This model is intended for direct use as a punctuation restoration model for the general Vietnamese language. Alternatively, you can use this for further fine-tuning on domain-specific texts for punctuation restoration tasks.
Model restores the following punctuations -- **[. , : ? ]**
The model also restores the complex upper-casing of words like *YouTube*, *MobiFone*.
-----------------------------------------------
## 🚋 Usage
**Below is a quick way to get up and running with the model.**
1. Download files from hub
```python
import os
import shutil
import sys
from huggingface_hub import snapshot_download
cache_dir = "./capu"
def download_files(repo_id, cache_dir=None, ignore_regex=None):
download_dir = snapshot_download(repo_id=repo_id, cache_dir=cache_dir, ignore_regex=ignore_regex)
if cache_dir is None or download_dir == cache_dir:
return download_dir
file_names = os.listdir(download_dir)
for file_name in file_names:
shutil.move(os.path.join(download_dir, file_name), cache_dir)
os.rmdir(download_dir)
return cache_dir
cache_dir = download_files(repo_id="dragonSwing/xlm-roberta-capu", cache_dir=cache_dir, ignore_regex=["*.json", "*.bin"])
sys.path.append(cache_dir)
```
2. Sample python code
```python
import os
from gec_model import GecBERTModel
model = GecBERTModel(
vocab_path=os.path.join(cache_dir, "vocabulary"),
model_paths="dragonSwing/xlm-roberta-capu",
split_chunk=True
)
model("theo đó thủ tướng dự kiến tiếp bộ trưởng nông nghiệp mỹ tom wilsack bộ trưởng thương mại mỹ gina raimondo bộ trưởng tài chính janet yellen gặp gỡ thượng nghị sĩ patrick leahy và một số nghị sĩ mỹ khác")
# Always return list of outputs.
# ['Theo đó, Thủ tướng dự kiến tiếp Bộ trưởng Nông nghiệp Mỹ Tom Wilsack, Bộ trưởng Thương mại Mỹ Gina Raimondo, Bộ trưởng Tài chính Janet Yellen, gặp gỡ Thượng nghị sĩ Patrick Leahy và một số nghị sĩ Mỹ khác.']
model("những gói cước năm g mobifone sẽ mang đến cho bạn những trải nghiệm mới lạ trên cả tuyệt vời so với mạng bốn g thì tốc độ truy cập mạng 5 g mobifone được nhận định là siêu đỉnh với mức truy cập nhanh gấp 10 lần")
# ['Những gói cước 5G MobiFone sẽ mang đến cho bạn những trải nghiệm mới lạ trên cả tuyệt vời. So với mạng 4G thì tốc độ truy cập mạng 5G MobiFone được Nhận định là siêu đỉnh với mức truy cập nhanh gấp 10 lần.']
```
**This model can work on arbitrarily large text in Vietnamese language.**
-----------------------------------------------
## 📡 Training data
Here is the number of product reviews we used for fine-tuning the model:
| Language | Number of text samples |
| --- | --- |
| Vietnamese | 5,600,000 |
-----------------------------------------------
## 🎯 Accuracy
Below is a breakdown of the performance of the model by each label on 10,000 held-out text samples:
| label | precision | recall | f1-score | support |
| --- | --- | --- | --- | --- |
| **Upper** | 0.89 | 0.90 | 0.89 | 56497 |
| **Complex-Upper** | 0.93 | 0.83 | 0.88 | 480 |
| **.** | 0.81 | 0.84 | 0.82 | 18139 |
| **,** | 0.69 | 0.75 | 0.72 | 22961 |
| **:** | 0.76 | 0.60 | 0.67 | 1432 |
| **?** | 0.82 | 0.75 | 0.78 | 1730 |
| **none** | 0.99 | 0.99 | 0.99 |475611 |
-----------------------------------------------
|
bookbot/wav2vec2-xls-r-adult-child-id-cls | f13a285b291c7f82a7ab4fc6ac2666557234fc3c | 2022-05-12T12:37:20.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"audio-classification",
"id",
"arxiv:2111.09296",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
]
| audio-classification | false | bookbot | null | bookbot/wav2vec2-xls-r-adult-child-id-cls | 8 | null | transformers | 13,434 | ---
language: id
license: apache-2.0
tags:
- audio-classification
- generated_from_trainer
metrics:
- accuracy
- f1
model-index:
- name: wav2vec2-xls-r-adult-child-id-cls
results: []
---
# Wav2Vec2 XLS-R Adult/Child Indonesian Speech Classifier
Wav2Vec2 XLS-R Adult/Child Indonesian Speech Classifier is an audio classification model based on the [XLS-R](https://arxiv.org/abs/2111.09296) architecture. This model is a fine-tuned version of [wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on a private adult/child Indonesian speech classification dataset.
This model was trained using HuggingFace's PyTorch framework. All training was done on a Tesla P100, provided by Kaggle. Training metrics were logged via Tensorboard.
## Model
| Model | #params | Arch. | Training/Validation data (text) |
| ----------------------------------- | ------- | ----- | ---------------------------------------------------- |
| `wav2vec2-xls-r-adult-child-id-cls` | 300M | XLS-R | Adult/Child Indonesian Speech Classification Dataset |
## Evaluation Results
The model achieves the following results on evaluation:
| Dataset | Loss | Accuracy | F1 |
| -------------------------------------------- | ------ | -------- | ------ |
| Adult/Child Indonesian Speech Classification | 0.1970 | 93.38% | 0.9307 |
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- `learning_rate`: 3e-05
- `train_batch_size`: 8
- `eval_batch_size`: 8
- `seed`: 42
- `gradient_accumulation_steps`: 4
- `total_train_batch_size`: 32
- `optimizer`: Adam with `betas=(0.9,0.999)` and `epsilon=1e-08`
- `lr_scheduler_type`: linear
- `lr_scheduler_warmup_ratio`: 0.1
- `num_epochs`: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
| :-----------: | :---: | :--: | :-------------: | :------: | :----: |
| 0.336 | 1.0 | 305 | 0.3146 | 0.8845 | 0.8698 |
| 0.2345 | 2.0 | 610 | 0.2140 | 0.9251 | 0.9202 |
| 0.3215 | 3.0 | 915 | 0.2038 | 0.9315 | 0.9286 |
| 0.2059 | 4.0 | 1220 | 0.1970 | 0.9338 | 0.9307 |
## Disclaimer
Do consider the biases which came from pre-training datasets that may be carried over into the results of this model.
## Authors
Wav2Vec2 XLS-R Adult/Child Indonesian Speech Classifier was trained and evaluated by [Ananto Joyoadikusumo](https://anantoj.github.io/). All computation and development are done on Kaggle.
## Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu102
- Datasets 2.2.0
- Tokenizers 0.12.1
|
guhuawuli/gpt2-poem_key_words | 6376e83df01e15831699662ea039dfdf240b949c | 2022-05-12T06:28:26.000Z | [
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
]
| text-generation | false | guhuawuli | null | guhuawuli/gpt2-poem_key_words | 8 | null | transformers | 13,435 | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: gpt2-poem_key_words
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# gpt2-poem_key_words
This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.5370
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.9544 | 1.0 | 670 | 2.6296 |
| 2.7014 | 2.0 | 1340 | 2.5557 |
| 2.6035 | 3.0 | 2010 | 2.5370 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.10.0a0+3fd9dcf
- Datasets 2.1.0
- Tokenizers 0.12.1
|
ahujaniharika95/distilbert-base-uncased-finetuned-squad | d2155f4e60abeca9807ac3749fb2e8268b72614e | 2022-06-17T09:25:22.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
]
| question-answering | false | ahujaniharika95 | null | ahujaniharika95/distilbert-base-uncased-finetuned-squad | 8 | null | transformers | 13,436 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
### Framework versions
- Transformers 4.19.2
- Pytorch 1.11.0
- Datasets 2.2.1
- Tokenizers 0.12.1
|
zhiguoxu/chinese-roberta-wwm-ext-finetuned-token-clasify | 088ddbe516da0270abbcbdce3acd745bb5b605c5 | 2022-05-13T09:43:29.000Z | [
"pytorch",
"bert",
"token-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
]
| token-classification | false | zhiguoxu | null | zhiguoxu/chinese-roberta-wwm-ext-finetuned-token-clasify | 8 | null | transformers | 13,437 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- f1
model-index:
- name: chinese-roberta-wwm-ext-finetuned-token-clasify
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# chinese-roberta-wwm-ext-finetuned-token-clasify
This model is a fine-tuned version of [hfl/chinese-roberta-wwm-ext](https://huggingface.co/hfl/chinese-roberta-wwm-ext) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0002
- F1: 1.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 6
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:---:|
| 1.2598 | 1.0 | 2 | 0.0999 | 1.0 |
| 0.0714 | 2.0 | 4 | 0.0014 | 1.0 |
| 0.0029 | 3.0 | 6 | 0.0002 | 1.0 |
| 0.0007 | 4.0 | 8 | 0.0002 | 1.0 |
| 0.0004 | 5.0 | 10 | 0.0002 | 1.0 |
| 0.0004 | 6.0 | 12 | 0.0002 | 1.0 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0
- Datasets 2.1.0
- Tokenizers 0.12.1
|
luckydog/bert-base-chinese-finetuned-mosei | fed4e1d45353c3c29b37ba6f8fd269b704918668 | 2022-05-12T15:46:33.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | luckydog | null | luckydog/bert-base-chinese-finetuned-mosei | 8 | null | transformers | 13,438 | Entry not found |
kathywu/DialoGPT-medium-kathy | ec75210e7541fe43adb11fde097d156f37a496bb | 2022-05-13T00:41:24.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | false | kathywu | null | kathywu/DialoGPT-medium-kathy | 8 | null | transformers | 13,439 | ---
tags:
- conversational
---
|
michojan/bert-finetuned-ner | 868ab62b63edd236a52304561006ade8b5046fad | 2022-05-13T13:14:15.000Z | [
"pytorch",
"tensorboard",
"bert",
"token-classification",
"dataset:conll2003",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
]
| token-classification | false | michojan | null | michojan/bert-finetuned-ner | 8 | null | transformers | 13,440 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- conll2003
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: bert-finetuned-ner
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: conll2003
type: conll2003
args: conll2003
metrics:
- name: Precision
type: precision
value: 0.9324078664683524
- name: Recall
type: recall
value: 0.9495119488387749
- name: F1
type: f1
value: 0.9408821812724089
- name: Accuracy
type: accuracy
value: 0.9864308000235474
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-finetuned-ner
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0622
- Precision: 0.9324
- Recall: 0.9495
- F1: 0.9409
- Accuracy: 0.9864
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.0862 | 1.0 | 1756 | 0.0649 | 0.9193 | 0.9371 | 0.9281 | 0.9831 |
| 0.0406 | 2.0 | 3512 | 0.0576 | 0.9235 | 0.9472 | 0.9352 | 0.9850 |
| 0.0197 | 3.0 | 5268 | 0.0622 | 0.9324 | 0.9495 | 0.9409 | 0.9864 |
### Framework versions
- Transformers 4.19.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.1
- Tokenizers 0.12.1
|
Jeevesh8/6ep_bert_ft_cola-50 | 2f18f513e1530d9db18a9ca030eaf952665ceb99 | 2022-05-14T13:22:23.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/6ep_bert_ft_cola-50 | 8 | null | transformers | 13,441 | Entry not found |
tanviraumi/meeting-minute | 733b0aad5357e49726935f0dc9900b27078d1ec0 | 2022-05-15T07:50:58.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"license:mit",
"autotrain_compatible"
]
| text2text-generation | false | tanviraumi | null | tanviraumi/meeting-minute | 8 | null | transformers | 13,442 | ---
license: mit
---
|
aliosm/sha3bor-general-diacritizer-canine-c | 6bc5fdfecdfe2971d01b9b027835b300d6d3f61f | 2022-05-28T09:41:44.000Z | [
"pytorch",
"canine",
"token-classification",
"ar",
"transformers",
"license:mit",
"autotrain_compatible"
]
| token-classification | false | aliosm | null | aliosm/sha3bor-general-diacritizer-canine-c | 8 | null | transformers | 13,443 | ---
language: ar
license: mit
widget:
- text: "توكلت في رزقي على الله خالقي وأيقنت أن الله لا شك رازقي."
- text: "أي شخص يتوقف عن التعلم هو عجوز، سواء كان في العشرين أو الثمانين."
- text: "الحياة رواية جميلة عليك قراءتها حتى النهاية، لا تتوقف أبدا عند سطر حزين قد تكون النهاية جميلة."
---
|
mriggs/wikisource_epoch1 | e2f6e4061918670e3459478c7e58bd91e0f09ed1 | 2022-05-16T10:01:32.000Z | [
"pytorch",
"flaubert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | false | mriggs | null | mriggs/wikisource_epoch1 | 8 | null | transformers | 13,444 | Entry not found |
YeRyeongLee/mental-bert-base-uncased-masked_finetuned-0517 | e077c2b0fa6e26f9ccf9393068fffe8928d7542a | 2022-05-17T08:14:26.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers",
"generated_from_trainer",
"model-index"
]
| text-classification | false | YeRyeongLee | null | YeRyeongLee/mental-bert-base-uncased-masked_finetuned-0517 | 8 | null | transformers | 13,445 | ---
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
model-index:
- name: mental-bert-base-uncased-masked_finetuned-0517
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mental-bert-base-uncased-masked_finetuned-0517
This model is a fine-tuned version of [mental/mental-bert-base-uncased](https://huggingface.co/mental/mental-bert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5217
- Accuracy: 0.917
- F1: 0.9171
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|
| No log | 1.0 | 3000 | 0.2922 | 0.8993 | 0.8997 |
| No log | 2.0 | 6000 | 0.3964 | 0.9063 | 0.9069 |
| No log | 3.0 | 9000 | 0.4456 | 0.9197 | 0.9197 |
| No log | 4.0 | 12000 | 0.5217 | 0.917 | 0.9171 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.9.0
- Datasets 1.16.1
- Tokenizers 0.10.3
|
JoanTirant/bert-finetuned-ner-accelerate | 8e7876fe0d030f5c05dd331ad1b920918560ce85 | 2022-05-17T10:50:50.000Z | [
"pytorch",
"bert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | false | JoanTirant | null | JoanTirant/bert-finetuned-ner-accelerate | 8 | null | transformers | 13,446 | Entry not found |
CEBaB/lstm.CEBaB.absa.inclusive.seed_99 | 19d91574aa71d5bdc374299a809df771dbebc632 | 2022-05-18T01:00:42.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | CEBaB | null | CEBaB/lstm.CEBaB.absa.inclusive.seed_99 | 8 | null | transformers | 13,447 | Entry not found |
alk/pegasus-cnn_dailymail_2 | 30d349d5ede53ddb05d341590f760438a6ad1d90 | 2022-05-19T20:13:04.000Z | [
"pytorch",
"tensorboard",
"pegasus",
"text2text-generation",
"dataset:cnn_dailymail",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
]
| text2text-generation | false | alk | null | alk/pegasus-cnn_dailymail_2 | 8 | null | transformers | 13,448 | ---
tags:
- generated_from_trainer
datasets:
- cnn_dailymail
model-index:
- name: pegasus-cnn_dailymail_2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pegasus-cnn_dailymail_2
This model is a fine-tuned version of [google/pegasus-cnn_dailymail](https://huggingface.co/google/pegasus-cnn_dailymail) on the cnn_dailymail dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4308
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- gradient_accumulation_steps: 16
- total_train_batch_size: 16
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.5344 | 0.6 | 500 | 1.4497 |
| 1.5068 | 1.2 | 1000 | 1.4386 |
| 1.4983 | 1.8 | 1500 | 1.4315 |
| 1.389 | 2.39 | 2000 | 1.4308 |
| 1.4246 | 2.99 | 2500 | 1.4277 |
| 1.3656 | 3.59 | 3000 | 1.4308 |
### Framework versions
- Transformers 4.19.2
- Pytorch 1.11.0+cu113
- Datasets 2.2.1
- Tokenizers 0.12.1
|
PontifexMaximus/TurkishTranslator | c64789b6e638d061f3797210fb8a51754ee7a43c | 2022-05-30T22:22:55.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"dataset:opus_infopankki",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
]
| text2text-generation | false | PontifexMaximus | null | PontifexMaximus/TurkishTranslator | 8 | 1 | transformers | 13,449 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- opus_infopankki
metrics:
- bleu
model-index:
- name: opus-mt-tr-en-finetuned-tr-to-en
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: opus_infopankki
type: opus_infopankki
args: en-tr
metrics:
- name: Bleu
type: bleu
value: 54.7617
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# opus-mt-tr-en-finetuned-tr-to-en
This model is a fine-tuned version of [Helsinki-NLP/opus-mt-tr-en](https://huggingface.co/Helsinki-NLP/opus-mt-tr-en) on the opus_infopankki dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6924
- Bleu: 54.7617
- Gen Len: 13.5501
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-06
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 16
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|
| No log | 1.0 | 412 | 1.1776 | 43.3104 | 12.9297 |
| 1.4032 | 2.0 | 824 | 1.0750 | 45.7912 | 12.9155 |
| 1.2268 | 3.0 | 1236 | 1.0019 | 47.6255 | 12.9251 |
| 1.141 | 4.0 | 1648 | 0.9411 | 49.0649 | 12.9302 |
| 1.0651 | 5.0 | 2060 | 0.8929 | 50.4894 | 12.9066 |
| 1.0651 | 6.0 | 2472 | 0.8519 | 51.5072 | 12.9067 |
| 1.0025 | 7.0 | 2884 | 0.8180 | 52.5035 | 12.8875 |
| 0.9582 | 8.0 | 3296 | 0.7893 | 51.7587 | 13.5338 |
| 0.9173 | 9.0 | 3708 | 0.7655 | 52.3566 | 13.5376 |
| 0.8892 | 10.0 | 4120 | 0.7449 | 53.0488 | 13.5545 |
| 0.8639 | 11.0 | 4532 | 0.7285 | 53.5965 | 13.5539 |
| 0.8639 | 12.0 | 4944 | 0.7152 | 53.9433 | 13.5547 |
| 0.8424 | 13.0 | 5356 | 0.7053 | 54.2509 | 13.5502 |
| 0.8317 | 14.0 | 5768 | 0.6981 | 54.5339 | 13.5502 |
| 0.817 | 15.0 | 6180 | 0.6938 | 54.7068 | 13.5448 |
| 0.8155 | 16.0 | 6592 | 0.6924 | 54.7617 | 13.5501 |
### Framework versions
- Transformers 4.19.2
- Pytorch 1.7.1+cu110
- Datasets 2.2.2
- Tokenizers 0.12.1
|
dyyyyyyyy/XTREME_squad_XLM-RoBERTa-large | c503692d9782c6bc061be40f60e781852091c387 | 2022-05-19T07:31:02.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | false | dyyyyyyyy | null | dyyyyyyyy/XTREME_squad_XLM-RoBERTa-large | 8 | null | transformers | 13,450 | Entry not found |
nreimers/mmarco-mMiniLMv2-L12-H384-v1 | 565e5ab4e1b6ce919492ed0c02703c276e729057 | 2022-05-20T07:40:57.000Z | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
]
| text-classification | false | nreimers | null | nreimers/mmarco-mMiniLMv2-L12-H384-v1 | 8 | null | transformers | 13,451 | Entry not found |
ragarwal/deberta-v3-base-nli-mixer-binary | e019e3b9cbfa2c328a04493981091cac297795a0 | 2022-05-20T10:38:28.000Z | [
"pytorch",
"deberta-v2",
"text-classification",
"transformers",
"license:mit"
]
| text-classification | false | ragarwal | null | ragarwal/deberta-v3-base-nli-mixer-binary | 8 | null | transformers | 13,452 | ---
license: mit
---
**NLI-Mixer** is an attempt to tackle the Natural Language Inference (NLI) task by mixing multiple datasets together.
The approach is simple:
1. Combine all available NLI data without any domain-dependent re-balancing or re-weighting.
2. Finetune several SOTA transformers of different sizes (20m parameters to 300m parameters) on the combined data.
3. Evaluate on challenging NLI datasets.
This model was trained using [SentenceTransformers](https://sbert.net) [Cross-Encoder](https://www.sbert.net/examples/applications/cross-encoder/README.html) class. It is based on [microsoft/deberta-v3-base](https://huggingface.co/microsoft/deberta-v3-base).
### Data
20+ NLI datasets were combined to train a binary classification model. The `contradiction` and `neutral` labels were combined to form a `non-entailment` class.
### Usage
In Transformers
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from torch.nn.functional import softmax, sigmoid
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name="ragarwal/deberta-v3-base-nli-mixer-binary"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
sentence = "During its monthly call, the National Oceanic and Atmospheric Administration warned of \
increased temperatures and low precipitation"
labels = ["Computer", "Climate Change", "Tablet", "Football", "Artificial Intelligence", "Global Warming"]
features = tokenizer([[sentence, l] for l in labels], padding=True, truncation=True, return_tensors="pt")
model.eval()
with torch.no_grad():
scores = model(**features).logits
print("Multi-Label:", sigmoid(scores)) #Multi-Label Classification
print("Single-Label:", softmax(scores, dim=0)) #Single-Label Classification
#Multi-Label: tensor([[0.0412],[0.2436],[0.0394],[0.0020],[0.0050],[0.1424]])
#Single-Label: tensor([[0.0742],[0.5561],[0.0709],[0.0035],[0.0087],[0.2867]])
```
In Sentence-Transformers
```python
from sentence_transformers import CrossEncoder
model_name="ragarwal/deberta-v3-base-nli-mixer-binary"
model = CrossEncoder(model_name, max_length=256)
sentence = "During its monthly call, the National Oceanic and Atmospheric Administration warned of \
increased temperatures and low precipitation"
labels = ["Computer", "Climate Change", "Tablet", "Football", "Artificial Intelligence", "Global Warming"]
scores = model.predict([[sentence, l] for l in labels])
print(scores)
#array([0.04118565, 0.2435827 , 0.03941465, 0.00203637, 0.00501176, 0.1423797], dtype=float32)
``` |
domischwimmbeck/bert-base-german-cased-20000-ner-uncased | d8c5ae4b1858aa7ba46589b09086df79e5f820c8 | 2022-05-20T13:45:45.000Z | [
"pytorch",
"tensorboard",
"bert",
"token-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
]
| token-classification | false | domischwimmbeck | null | domischwimmbeck/bert-base-german-cased-20000-ner-uncased | 8 | null | transformers | 13,453 | ---
license: mit
tags:
- generated_from_trainer
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: bert-base-german-cased-20000-ner-uncased
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-german-cased-20000-ner-uncased
This model is a fine-tuned version of [dbmdz/bert-base-german-uncased](https://huggingface.co/dbmdz/bert-base-german-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0664
- Precision: 0.9061
- Recall: 0.8697
- F1: 0.8875
- Accuracy: 0.9838
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| No log | 0.11 | 64 | 0.1129 | 0.8012 | 0.8547 | 0.8271 | 0.9729 |
| No log | 0.23 | 128 | 0.0879 | 0.7882 | 0.8426 | 0.8145 | 0.9771 |
| No log | 0.34 | 192 | 0.0662 | 0.8711 | 0.8523 | 0.8616 | 0.9815 |
| No log | 0.45 | 256 | 0.0627 | 0.8839 | 0.8553 | 0.8694 | 0.9820 |
| No log | 0.57 | 320 | 0.0669 | 0.8677 | 0.8709 | 0.8693 | 0.9806 |
| No log | 0.68 | 384 | 0.0568 | 0.8669 | 0.8685 | 0.8677 | 0.9823 |
| No log | 0.79 | 448 | 0.0620 | 0.9066 | 0.8631 | 0.8843 | 0.9827 |
| 0.0861 | 0.9 | 512 | 0.0603 | 0.8743 | 0.8859 | 0.8801 | 0.9829 |
| 0.0861 | 1.02 | 576 | 0.0552 | 0.8983 | 0.8697 | 0.8837 | 0.9845 |
| 0.0861 | 1.13 | 640 | 0.0563 | 0.9007 | 0.8823 | 0.8914 | 0.9847 |
| 0.0861 | 1.24 | 704 | 0.0605 | 0.8683 | 0.8829 | 0.8755 | 0.9834 |
| 0.0861 | 1.36 | 768 | 0.0547 | 0.9199 | 0.8895 | 0.9044 | 0.9857 |
| 0.0861 | 1.47 | 832 | 0.0585 | 0.9159 | 0.8703 | 0.8925 | 0.9845 |
| 0.0861 | 1.58 | 896 | 0.0601 | 0.8818 | 0.8871 | 0.8844 | 0.9834 |
| 0.0861 | 1.7 | 960 | 0.0664 | 0.9061 | 0.8697 | 0.8875 | 0.9838 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.9.0+cu111
- Datasets 2.1.0
- Tokenizers 0.12.1
|
north/t5_large_NCC_lm | 622a04c879e35d3b0ea677f4278d85627ade6bf4 | 2022-06-01T19:41:16.000Z | [
"pytorch",
"tf",
"jax",
"tensorboard",
"t5",
"text2text-generation",
"no",
"nn",
"sv",
"dk",
"is",
"en",
"dataset:nbailab/NCC",
"dataset:mc4",
"dataset:wikipedia",
"arxiv:2104.09617",
"arxiv:1910.10683",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| text2text-generation | false | north | null | north/t5_large_NCC_lm | 8 | null | transformers | 13,454 | ---
language:
- no
- nn
- sv
- dk
- is
- en
datasets:
- nbailab/NCC
- mc4
- wikipedia
widget:
- text: <extra_id_0> hver uke samles Regjeringens medlemmer til Statsråd på <extra_id_1>. Dette organet er øverste <extra_id_2> i Norge. For at møtet skal være <extra_id_3>, må over halvparten av regjeringens <extra_id_4> være til stede.
- text: På <extra_id_0> kan man <extra_id_1> en bok, og man kan også <extra_id_2> seg ned og lese den.
license: apache-2.0
---
-T5
The North-T5-models are a set of Norwegian sequence-to-sequence-models. It builds upon the flexible [T5](https://github.com/google-research/text-to-text-transfer-transformer) and [T5X](https://github.com/google-research/t5x) and can be used for a variety of NLP tasks ranging from classification to translation.
| |**Small** <br />_60M_|**Base** <br />_220M_|**Large** <br />_770M_|**XL** <br />_3B_|**XXL** <br />_11B_|
|:-----------|:------------:|:------------:|:------------:|:------------:|:------------:|
|North-T5‑NCC|[🤗](https://huggingface.co/north/t5_small_NCC)|[🤗](https://huggingface.co/north/t5_base_NCC)|[🤗](https://huggingface.co/north/t5_large_NCC)|[🤗](https://huggingface.co/north/t5_xl_NCC)|[🤗](https://huggingface.co/north/t5_xxl_NCC)||
|North-T5‑NCC‑lm|[🤗](https://huggingface.co/north/t5_small_NCC_lm)|[🤗](https://huggingface.co/north/t5_base_NCC_lm)|✔|[🤗](https://huggingface.co/north/t5_xl_NCC_lm)|[🤗](https://huggingface.co/north/t5_xxl_NCC_lm)||
## T5X Checkpoint
The original T5X checkpoint is also available for this model in the [Google Cloud Bucket](gs://north-t5x/pretrained_models/large/norwegian_NCC_plus_English_pluss100k_lm_t5x_large/).
## Performance
A thorough evaluation of the North-T5 models is planned, and I strongly recommend external researchers to make their own evaluation. The main advantage with the T5-models are their flexibility. Traditionally, encoder-only models (like BERT) excels in classification tasks, while seq-2-seq models are easier to train for tasks like translation and Q&A. Despite this, here are the results from using North-T5 on the political classification task explained [here](https://arxiv.org/abs/2104.09617).
|**Model:** | **F1** |
|:-----------|:------------|
|mT5-base|73.2 |
|mBERT-base|78.4 |
|NorBERT-base|78.2 |
|North-T5-small|80.5 |
|nb-bert-base|81.8 |
|North-T5-base|85.3 |
|North-T5-large|86.7 |
|North-T5-xl|88.7 |
|North-T5-xxl|91.8|
These are preliminary results. The [results](https://arxiv.org/abs/2104.09617) from the BERT-models are based on the test-results from the best model after 10 runs with early stopping and a decaying learning rate. The T5-results are the average of five runs on the evaluation set. The small-model was trained for 10.000 steps, while the rest for 5.000 steps. A fixed learning rate was used (no decay), and no early stopping. Neither was the recommended rank classification used. We use a max sequence length of 512. This method simplifies the test setup and gives results that are easy to interpret. However, the results from the T5 model might actually be a bit sub-optimal.
## Sub-versions of North-T5
The following sub-versions are available. More versions will be available shorter.
|**Model** | **Description** |
|:-----------|:-------|
|**North‑T5‑NCC** |This is the main version. It is trained an additonal 500.000 steps on from the mT5 checkpoint. The training corpus is based on [the Norwegian Colossal Corpus (NCC)](https://huggingface.co/datasets/NbAiLab/NCC). In addition there are added data from MC4 and English Wikipedia.|
|**North‑T5‑NCC‑lm**|The model is pretrained for an addtional 100k steps on the LM objective discussed in the [T5 paper](https://arxiv.org/pdf/1910.10683.pdf). In a way this turns a masked language model into an autoregressive model. It also prepares the model for some tasks. When for instance doing translation and NLI, it is well documented that there is a clear benefit to do a step of unsupervised LM-training before starting the finetuning.|
## Fine-tuned versions
As explained below, the model really needs to be fine-tuned for specific tasks. This procedure is relatively simple, and the models are not very sensitive to the hyper-parameters used. Usually a decent result can be obtained by using a fixed learning rate of 1e-3. Smaller versions of the model typically needs to be trained for a longer time. It is easy to train the base-models in a Google Colab.
Since some people really want to see what the models are capable of, without going through the training procedure, I provide a couple of test models. These models are by no means optimised, and are just for demonstrating how the North-T5 models can be used.
* Nynorsk Translator. Translates any text from Norwegian Bokmål to Norwegian Nynorsk. Please test the [Streamlit-demo](https://huggingface.co/spaces/north/Nynorsk) and the [HuggingFace repo](https://huggingface.co/north/demo-nynorsk-base)
* DeUnCaser. The model adds punctation, spaces and capitalisation back into the text. The input needs to be in Norwegian but does not have to be divided into sentences or have proper capitalisation of words. You can even remove the spaces from the text, and make the model reconstruct it. It can be tested with the [Streamlit-demo](https://huggingface.co/spaces/north/DeUnCaser) and directly on the [HuggingFace repo](https://huggingface.co/north/demo-deuncaser-base)
## Training details
All models are built using the Flax-based T5X codebase, and all models are initiated with the mT5 pretrained weights. The models are trained using the T5.1.1 training regime, where they are only trained on an unsupervised masking-task. This also means that the models (contrary to the original T5) needs to be finetuned to solve specific tasks. This finetuning is however usually not very compute intensive, and in most cases it can be performed even with free online training resources.
All the main model model versions are trained for 500.000 steps after the mT5 checkpoint (1.000.000 steps). They are trained mainly on a 75GB corpus, consisting of NCC, Common Crawl and some additional high quality English text (Wikipedia). The corpus is roughly 80% Norwegian text. Additional languages are added to retain some of the multilingual capabilities, making the model both more robust to new words/concepts and also more suited as a basis for translation tasks.
While the huge models almost always will give the best results, they are also both more difficult and more expensive to finetune. I will strongly recommended to start with finetuning a base-models. The base-models can easily be finetuned on a standard graphic card or a free TPU through Google Colab.
All models were trained on TPUs. The largest XXL model was trained on a TPU v4-64, the XL model on a TPU v4-32, the Large model on a TPU v4-16 and the rest on TPU v4-8. Since it is possible to reduce the batch size during fine-tuning, it is also possible to finetune on slightly smaller hardware. The rule of thumb is that you can go "one step down" when finetuning. The large models still rewuire access to significant hardware, even for finetuning.
## Formats
All models are trained using the Flax-based T5X library. The original checkpoints are available in T5X format and can be used for both finetuning or interference. All models, except the XXL-model, are also converted to Transformers/HuggingFace. In this framework, the models can be loaded for finetuning or inference both in Flax, PyTorch and TensorFlow format.
## Future
I will continue to train and release additional models to this set. What models that are added is dependent upon the feedbacki from the users
## Thanks
This release would not have been possible without getting support and hardware from the [TPU Research Cloud](https://sites.research.google/trc/about/) at Google Research. Both the TPU Research Cloud Team and the T5X Team has provided extremely useful support for getting this running.
Freddy Wetjen at the National Library of Norway has been of tremendous help in generating the original NCC corpus, and has also contributed to generate the collated coprus used for this training. In addition he has been a dicussion partner in the creation of these models.
Also thanks to Stefan Schweter for writing the [script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/convert_t5x_checkpoint_to_flax.py) for converting these models from T5X to HuggingFace and to Javier de la Rosa for writing the dataloader for reading the HuggingFace Datasets in T5X.
## Warranty
Use at your own risk. The models have not yet been thougroughly tested, and may contain both errors and biases.
## Contact/About
These models were trained by Per E Kummervold. Please contact me on [email protected].
|
strickvl/nlp-redaction-classifier | 51404dc84a73bbb56304c736ae4b16458bfd0317 | 2022-05-21T20:16:25.000Z | [
"pytorch",
"deberta-v2",
"text-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index"
]
| text-classification | false | strickvl | null | strickvl/nlp-redaction-classifier | 8 | 2 | transformers | 13,455 | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: nlp-redaction-classifier
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Redaction Classifier: NLP Edition
This model is a fine-tuned version of [microsoft/deberta-v3-small](https://huggingface.co/microsoft/deberta-v3-small) on a custom dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0893
- Pearson: 0.8273
## Model description
Read more about the process and the code used to train this model on my blog [here](https://mlops.systems).
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 4
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 6
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Pearson |
|:-------------:|:-----:|:----:|:---------------:|:-------:|
| 0.2054 | 1.0 | 729 | 0.1382 | 0.6771 |
| 0.1386 | 2.0 | 1458 | 0.1099 | 0.7721 |
| 0.0782 | 3.0 | 2187 | 0.0950 | 0.8083 |
| 0.054 | 4.0 | 2916 | 0.0945 | 0.8185 |
| 0.0319 | 5.0 | 3645 | 0.0880 | 0.8251 |
| 0.0254 | 6.0 | 4374 | 0.0893 | 0.8273 |
### Framework versions
- Transformers 4.19.2
- Pytorch 1.11.0a0+17540c5
- Datasets 2.2.2
- Tokenizers 0.12.1
|
connectivity/cola_6ep_ft-41 | c59c3f1085fb91be5d54aa60da5ba806c99accad | 2022-05-21T16:43:56.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | connectivity | null | connectivity/cola_6ep_ft-41 | 8 | null | transformers | 13,456 | Entry not found |
connectivity/cola_6ep_ft-42 | e9d75d16782e401e408895412e35ae07fbfc3345 | 2022-05-21T16:43:56.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | connectivity | null | connectivity/cola_6ep_ft-42 | 8 | null | transformers | 13,457 | Entry not found |
connectivity/cola_6ep_ft-44 | 1fca9eb464067b5713b9740f573eb7a23fd677d0 | 2022-05-21T16:43:57.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | connectivity | null | connectivity/cola_6ep_ft-44 | 8 | null | transformers | 13,458 | Entry not found |
connectivity/cola_6ep_ft-46 | 2c713671c5efa4533f535ef8f38387593087a5df | 2022-05-21T16:43:58.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | connectivity | null | connectivity/cola_6ep_ft-46 | 8 | null | transformers | 13,459 | Entry not found |
SamuelMiller/sum_sum | f4482e3f155c7f6e0171d780f9a81d6136445497 | 2022-05-22T08:16:23.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
]
| text2text-generation | false | SamuelMiller | null | SamuelMiller/sum_sum | 8 | null | transformers | 13,460 | Entry not found |
Splend1dchan/wav2vec2-large-lv60_t5lephone-small_nofreeze_bs64 | 0819314173fb4963e7a764e952d65a554c842389 | 2022-05-27T13:53:27.000Z | [
"pytorch",
"speechmix",
"transformers"
]
| null | false | Splend1dchan | null | Splend1dchan/wav2vec2-large-lv60_t5lephone-small_nofreeze_bs64 | 8 | null | transformers | 13,461 | Entry not found |
reannayang/segformer-b0-pavement | 2c3ba1fb7cfca4fd0104e814c11e7ff940d63c7f | 2022-05-23T13:29:00.000Z | [
"pytorch",
"tensorboard",
"segformer",
"transformers",
"vision",
"image-segmentation",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
]
| image-segmentation | false | reannayang | null | reannayang/segformer-b0-pavement | 8 | null | transformers | 13,462 | ---
license: apache-2.0
tags:
- vision
- image-segmentation
- generated_from_trainer
model-index:
- name: segformer-b0-pavement
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# segformer-b0-pavement
This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the reannayang/FL_pavement dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4165
- Mean Iou: 0.6318
- Mean Accuracy: 0.9700
- Overall Accuracy: 0.9738
- Per Category Iou: [0.0, 0.964166382973358, 0.9809231860559384, 0.0, 0.9295139919583345, 0.9164463823409184]
- Per Category Accuracy: [nan, 0.9643001261034048, 0.9983497924348297, nan, 0.995031342981772, 0.9223532638507954]
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 6e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
### Training results
| Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Per Category Iou | Per Category Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------:|
| 1.0651 | 10.0 | 20 | 1.3005 | 0.5967 | 0.9512 | 0.9534 | [0.0, 0.9462421185372005, 0.9681701711239586, 0.0, 0.7994398965962947, 0.8662896799897185] | [nan, 0.9462421185372005, 0.9693809143181291, nan, 0.9648149753011526, 0.9243828853538124] |
| 0.5732 | 20.0 | 40 | 0.6626 | 0.6287 | 0.9702 | 0.9760 | [0.0, 0.975246652572234, 0.985446932366533, 0.0, 0.9010974339804011, 0.9103918683964157] | [nan, 0.9772635561160151, 0.9952040842637238, nan, 0.9748678395008233, 0.9334887547997806] |
| 0.6987 | 30.0 | 60 | 0.4319 | 0.6317 | 0.9705 | 0.9758 | [0.0, 0.9709705045212967, 0.9798115236227942, 0.0, 0.9255918522130127, 0.9139245313729214] | [nan, 0.9722194199243379, 0.9986205296134905, nan, 0.9871161568015715, 0.924026330224904] |
| 0.6915 | 40.0 | 80 | 0.4382 | 0.6237 | 0.9634 | 0.9692 | [0.0, 0.9611727616645649, 0.9725125142706595, 0.0, 0.9147983251179308, 0.8937433316006894] | [nan, 0.9611727616645649, 0.9993811721630611, nan, 0.9971690210012422, 0.896023038946791] |
| 0.4373 | 50.0 | 100 | 0.4165 | 0.6318 | 0.9700 | 0.9738 | [0.0, 0.964166382973358, 0.9809231860559384, 0.0, 0.9295139919583345, 0.9164463823409184] | [nan, 0.9643001261034048, 0.9983497924348297, nan, 0.995031342981772, 0.9223532638507954] |
### Framework versions
- Transformers 4.19.2
- Pytorch 1.7.1
- Datasets 2.2.1
- Tokenizers 0.12.1
|
arize-ai/distilbert_reviews_with_context_drift | 2142a949472af600e1961a6272b7f2a78a7a7d55 | 2022-05-24T06:43:55.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"dataset:reviews_with_drift",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
]
| text-classification | false | arize-ai | null | arize-ai/distilbert_reviews_with_context_drift | 8 | 2 | transformers | 13,463 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- reviews_with_drift
metrics:
- accuracy
- f1
model-index:
- name: distilbert_finetuned_reviews_with_drift
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: reviews_with_drift
type: reviews_with_drift
args: default
metrics:
- name: Accuracy
type: accuracy
value: 0.854780153287616
- name: F1
type: f1
value: 0.8547073010596418
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_finetuned_reviews_with_drift
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the reviews_with_drift dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3822
- Accuracy: 0.8548
- F1: 0.8547
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
| 0.4173 | 1.0 | 620 | 0.3519 | 0.8511 | 0.8511 |
| 0.259 | 2.0 | 1240 | 0.3822 | 0.8548 | 0.8547 |
### Framework versions
- Transformers 4.19.2
- Pytorch 1.11.0+cu113
- Datasets 2.2.2
- Tokenizers 0.12.1
|
joebobby/finetuning-sentiment-model-5000-samples | ea0a93b92f8ad36836b650d59bb8fbd00f90b546 | 2022-05-26T06:08:39.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
]
| text-classification | false | joebobby | null | joebobby/finetuning-sentiment-model-5000-samples | 8 | null | transformers | 13,464 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
model-index:
- name: finetuning-sentiment-model-5000-samples
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetuning-sentiment-model-5000-samples
This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.0701
- Accuracy: 0.758
- F1: 0.7580
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
| No log | 1.0 | 313 | 1.0216 | 0.744 | 0.744 |
| 0.2263 | 2.0 | 626 | 1.0701 | 0.758 | 0.7580 |
| 0.2263 | 3.0 | 939 | 1.3097 | 0.723 | 0.723 |
| 0.1273 | 4.0 | 1252 | 1.4377 | 0.743 | 0.743 |
| 0.051 | 5.0 | 1565 | 1.4884 | 0.739 | 0.739 |
### Framework versions
- Transformers 4.19.2
- Pytorch 1.11.0+cu113
- Datasets 2.2.2
- Tokenizers 0.12.1
|
jakka/segformer-b0-finetuned-segments-sidewalk-4 | 24f1befe20320fcbbc46eb59fd99b25d2598c5e7 | 2022-05-30T11:56:11.000Z | [
"pytorch",
"segformer",
"transformers",
"vision",
"image-segmentation",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
]
| image-segmentation | false | jakka | null | jakka/segformer-b0-finetuned-segments-sidewalk-4 | 8 | null | transformers | 13,465 | ---
license: apache-2.0
tags:
- vision
- image-segmentation
- generated_from_trainer
model-index:
- name: segformer-b0-finetuned-segments-sidewalk-4
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# segformer-b0-finetuned-segments-sidewalk-4
This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the segments/sidewalk-semantic dataset.
It achieves the following results on the evaluation set:
- Loss: 1.6258
- Mean Iou: 0.1481
- Mean Accuracy: 0.1991
- Overall Accuracy: 0.7316
- Per Category Iou: [nan, 0.4971884694242825, 0.7844619900838784, 0.0, 0.10165655377640956, 0.007428563507709108, nan, 4.566798099115959e-06, 0.0, 0.0, 0.5570746278221521, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.534278997386317, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.7557693923373933, 0.5270379031768208, 0.8254522211471568, 0.0, 0.0, 0.0, 0.0]
- Per Category Accuracy: [nan, 0.8698779680369205, 0.9122325676343133, 0.0, 0.10179229832932858, 0.007508413919135004, nan, 4.566798099115959e-06, 0.0, 0.0, 0.8968168359562617, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.8492049383357001, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.9388033874781816, 0.6627890453030717, 0.9334458854084583, 0.0, 0.0, 0.0, 0.0]
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 6e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Per Category Iou | Per Category Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
| 1.7912 | 1.0 | 25 | 1.6392 | 0.1412 | 0.1911 | 0.7210 | [nan, 0.48942576059104514, 0.7754689525048201, 0.0, 0.031932013148008094, 0.004348266117522573, nan, 1.5527099355168697e-05, 0.0, 0.0, 0.5356571432088642, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.5243044552616699, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.7355207837531991, 0.4479559177066271, 0.8315839315332364, 0.0, 0.0, 0.0, 0.0] | [nan, 0.8476069713517648, 0.9129050708992534, 0.0, 0.03194435645315849, 0.004370669306327572, nan, 1.552711353699426e-05, 0.0, 0.0, 0.897824434787493, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.8555478632753987, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.9510113270409175, 0.5116786406550935, 0.9122706949370997, 0.0, 0.0, 0.0, 0.0] |
| 1.7531 | 2.0 | 50 | 1.6258 | 0.1481 | 0.1991 | 0.7316 | [nan, 0.4971884694242825, 0.7844619900838784, 0.0, 0.10165655377640956, 0.007428563507709108, nan, 4.566798099115959e-06, 0.0, 0.0, 0.5570746278221521, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.534278997386317, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.7557693923373933, 0.5270379031768208, 0.8254522211471568, 0.0, 0.0, 0.0, 0.0] | [nan, 0.8698779680369205, 0.9122325676343133, 0.0, 0.10179229832932858, 0.007508413919135004, nan, 4.566798099115959e-06, 0.0, 0.0, 0.8968168359562617, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.8492049383357001, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.9388033874781816, 0.6627890453030717, 0.9334458854084583, 0.0, 0.0, 0.0, 0.0] |
### Framework versions
- Transformers 4.19.2
- Pytorch 1.11.0
- Datasets 2.2.2
- Tokenizers 0.12.1
|
Santarabantoosoo/PathologyBERT-meningioma | cdd3c943f4016240d827844629ae3c7aa1a75017 | 2022-05-31T11:50:13.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers",
"generated_from_trainer",
"model-index"
]
| text-classification | false | Santarabantoosoo | null | Santarabantoosoo/PathologyBERT-meningioma | 8 | null | transformers | 13,466 | ---
tags:
- generated_from_trainer
metrics:
- accuracy
- precision
- recall
- f1
model-index:
- name: PathologyBERT-meningioma
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# PathologyBERT-meningioma
This model is a fine-tuned version of [tsantos/PathologyBERT](https://huggingface.co/tsantos/PathologyBERT) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8123
- Accuracy: 0.8783
- Precision: 0.25
- Recall: 0.0833
- F1: 0.125
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 0
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
| 0.3723 | 1.0 | 71 | 0.5377 | 0.7652 | 0.0588 | 0.0833 | 0.0690 |
| 0.3363 | 2.0 | 142 | 0.4191 | 0.8783 | 0.25 | 0.0833 | 0.125 |
| 0.2773 | 3.0 | 213 | 0.4701 | 0.8870 | 0.3333 | 0.0833 | 0.1333 |
| 0.2303 | 4.0 | 284 | 0.5831 | 0.8957 | 0.5 | 0.0833 | 0.1429 |
| 0.1657 | 5.0 | 355 | 0.7083 | 0.8348 | 0.1111 | 0.0833 | 0.0952 |
| 0.1228 | 6.0 | 426 | 1.0324 | 0.8 | 0.0769 | 0.0833 | 0.08 |
| 0.0967 | 7.0 | 497 | 0.8103 | 0.8696 | 0.2 | 0.0833 | 0.1176 |
| 0.0729 | 8.0 | 568 | 0.8711 | 0.8696 | 0.2 | 0.0833 | 0.1176 |
| 0.0624 | 9.0 | 639 | 0.7968 | 0.8783 | 0.25 | 0.0833 | 0.125 |
| 0.0534 | 10.0 | 710 | 0.8123 | 0.8783 | 0.25 | 0.0833 | 0.125 |
### Framework versions
- Transformers 4.12.2
- Pytorch 1.10.1
- Datasets 1.15.0
- Tokenizers 0.10.3
|
GiordanoB/mT5_multilingual_XLSum-sumarizacao-PTBR | 003c360e23db41266566263916efb982defd4c44 | 2022-06-01T13:10:06.000Z | [
"pytorch",
"tensorboard",
"mt5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
]
| text2text-generation | false | GiordanoB | null | GiordanoB/mT5_multilingual_XLSum-sumarizacao-PTBR | 8 | null | transformers | 13,467 | ---
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: mT5_multilingual_XLSum-sumarizacao-PTBR
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mT5_multilingual_XLSum-sumarizacao-PTBR
This model is a fine-tuned version of [csebuetnlp/mT5_multilingual_XLSum](https://huggingface.co/csebuetnlp/mT5_multilingual_XLSum) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.3870
- Rouge1: 42.0195
- Rouge2: 24.9493
- Rougel: 32.3653
- Rougelsum: 37.9982
- Gen Len: 77.0
## Let's see the model in action!
```python
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip()))
model_name = "GiordanoB/mT5_multilingual_XLSum-sumarizacao-PTBR"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
input_ids = tokenizer(
[WHITESPACE_HANDLER(sumariosDuplos[i])],
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=512
)["input_ids"]
output_ids = model.generate(
input_ids=input_ids,
max_length=200,
min_length=75,
no_repeat_ngram_size=2,
num_beams=5
)[0]
summary = tokenizer.decode(
output_ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=False
)
sumariosFinal.append(summary)
print(i,"\n",summary,"\n")
```
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
| No log | 1.0 | 15 | 1.5687 | 32.2316 | 18.9289 | 23.918 | 27.7216 | 51.5714 |
| No log | 2.0 | 30 | 1.4530 | 41.2297 | 26.1883 | 30.8012 | 37.1727 | 69.5714 |
| No log | 3.0 | 45 | 1.4043 | 40.8986 | 24.4993 | 31.349 | 36.8782 | 72.2143 |
| No log | 4.0 | 60 | 1.3908 | 42.1019 | 25.5555 | 32.9018 | 38.0202 | 74.5 |
| No log | 5.0 | 75 | 1.3870 | 42.0195 | 24.9493 | 32.3653 | 37.9982 | 77.0 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0
- Datasets 2.1.0
- Tokenizers 0.12.1
|
cjbarrie/masress-medcrit-camel | 3f1d10c1652f4a8c612bf777826636c44f8039ac | 2022-06-01T13:23:54.000Z | [
"pytorch",
"bert",
"text-classification",
"unk",
"dataset:cjbarrie/autotrain-data-masress-medcrit-binary-5",
"transformers",
"autotrain",
"co2_eq_emissions"
]
| text-classification | false | cjbarrie | null | cjbarrie/masress-medcrit-camel | 8 | null | transformers | 13,468 | ---
tags: autotrain
language: unk
widget:
- text: "الكل ينتقد الرئيس على إخفاقاته"
datasets:
- cjbarrie/autotrain-data-masress-medcrit-binary-5
co2_eq_emissions: 0.01017487638098474
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 937130980
- CO2 Emissions (in grams): 0.01017487638098474
## Validation Metrics
- Loss: 0.757265031337738
- Accuracy: 0.7551020408163265
- Macro F1: 0.7202470830473576
- Micro F1: 0.7551020408163265
- Weighted F1: 0.7594301962377263
- Macro Precision: 0.718716577540107
- Micro Precision: 0.7551020408163265
- Weighted Precision: 0.7711448215649895
- Macro Recall: 0.7285714285714286
- Micro Recall: 0.7551020408163265
- Weighted Recall: 0.7551020408163265
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/cjbarrie/autotrain-masress-medcrit-binary-5-937130980
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("cjbarrie/autotrain-masress-medcrit-binary-5-937130980", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("cjbarrie/autotrain-masress-medcrit-binary-5-937130980", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
chrisvinsen/wav2vec2-final-1-lm-4 | f92173c8dc4d1631bc4f66f53a7bb0c8292caadb | 2022-06-02T12:03:09.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
]
| automatic-speech-recognition | false | chrisvinsen | null | chrisvinsen/wav2vec2-final-1-lm-4 | 8 | null | transformers | 13,469 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: wav2vec2-19
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-19
WER 0.283
WER 0.126 with 5-Gram
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6305
- Wer: 0.4499
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 32
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 800
- num_epochs: 60
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 3.4816 | 2.74 | 400 | 1.0717 | 0.8927 |
| 0.751 | 5.48 | 800 | 0.7155 | 0.7533 |
| 0.517 | 8.22 | 1200 | 0.7039 | 0.6675 |
| 0.3988 | 10.96 | 1600 | 0.5935 | 0.6149 |
| 0.3179 | 13.7 | 2000 | 0.6477 | 0.5999 |
| 0.2755 | 16.44 | 2400 | 0.5549 | 0.5798 |
| 0.2343 | 19.18 | 2800 | 0.6626 | 0.5798 |
| 0.2103 | 21.92 | 3200 | 0.6488 | 0.5674 |
| 0.1877 | 24.66 | 3600 | 0.5874 | 0.5339 |
| 0.1719 | 27.4 | 4000 | 0.6354 | 0.5389 |
| 0.1603 | 30.14 | 4400 | 0.6612 | 0.5210 |
| 0.1401 | 32.88 | 4800 | 0.6676 | 0.5131 |
| 0.1286 | 35.62 | 5200 | 0.6366 | 0.5075 |
| 0.1159 | 38.36 | 5600 | 0.6064 | 0.4977 |
| 0.1084 | 41.1 | 6000 | 0.6530 | 0.4835 |
| 0.0974 | 43.84 | 6400 | 0.6118 | 0.4853 |
| 0.0879 | 46.58 | 6800 | 0.6316 | 0.4770 |
| 0.0815 | 49.32 | 7200 | 0.6125 | 0.4664 |
| 0.0708 | 52.05 | 7600 | 0.6449 | 0.4683 |
| 0.0651 | 54.79 | 8000 | 0.6068 | 0.4571 |
| 0.0555 | 57.53 | 8400 | 0.6305 | 0.4499 |
### Framework versions
- Transformers 4.19.2
- Pytorch 1.11.0+cu113
- Datasets 2.2.2
- Tokenizers 0.12.1
|
wapari/KoGPT-trinity-tales | 1614160ff9b3aa2771efd4b05a6da50ac3ae2cb1 | 2022-06-02T03:43:33.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"license:cc-by-nc-sa-4.0"
]
| text-generation | false | wapari | null | wapari/KoGPT-trinity-tales | 8 | null | transformers | 13,470 | ---
license: cc-by-nc-sa-4.0
---
|
yannis95/bert-finetuned-ner | 7b10d9ce870b3b07e373c1590465cc2f463a26ef | 2022-06-02T12:35:12.000Z | [
"pytorch",
"tensorboard",
"bert",
"token-classification",
"dataset:conll2003",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
]
| token-classification | false | yannis95 | null | yannis95/bert-finetuned-ner | 8 | null | transformers | 13,471 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- conll2003
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: bert-finetuned-ner
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: conll2003
type: conll2003
args: conll2003
metrics:
- name: Precision
type: precision
value: 0.926145730300033
- name: Recall
type: recall
value: 0.9454729047458769
- name: F1
type: f1
value: 0.935709526982012
- name: Accuracy
type: accuracy
value: 0.9851209748631307
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-finetuned-ner
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0665
- Precision: 0.9261
- Recall: 0.9455
- F1: 0.9357
- Accuracy: 0.9851
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.0852 | 1.0 | 1756 | 0.0650 | 0.9197 | 0.9367 | 0.9281 | 0.9830 |
| 0.0407 | 2.0 | 3512 | 0.0621 | 0.9225 | 0.9438 | 0.9330 | 0.9848 |
| 0.0195 | 3.0 | 5268 | 0.0665 | 0.9261 | 0.9455 | 0.9357 | 0.9851 |
### Framework versions
- Transformers 4.19.1
- Pytorch 1.11.0
- Datasets 2.2.2
- Tokenizers 0.12.1
|
Jeevesh8/init_bert_ft_qqp-15 | eef52e925b9ff308fc3724a434031a7173eb5ccf | 2022-06-02T12:41:48.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-15 | 8 | null | transformers | 13,472 | Entry not found |
Jeevesh8/init_bert_ft_qqp-19 | d50f7ad36c5a7cbde6dcd2cbc2be6d9bce79c50d | 2022-06-02T12:39:47.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-19 | 8 | null | transformers | 13,473 | Entry not found |
Jeevesh8/init_bert_ft_qqp-28 | 9c92254d2169f0891a1ad84e1591ba3514441879 | 2022-06-02T12:39:37.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-28 | 8 | null | transformers | 13,474 | Entry not found |
Jeevesh8/init_bert_ft_qqp-45 | 6b76f6d5be167534036292832aed81b2cd4d3ed2 | 2022-06-02T12:39:28.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-45 | 8 | null | transformers | 13,475 | Entry not found |
Jeevesh8/init_bert_ft_qqp-33 | 4d9ec5fcb522a8f0d4ac92000ae6fdcfc901a11b | 2022-06-02T12:39:52.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-33 | 8 | null | transformers | 13,476 | Entry not found |
Jeevesh8/init_bert_ft_qqp-43 | b470333827dd3d6cd6a44cace02055fde124ff23 | 2022-06-02T12:39:51.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-43 | 8 | null | transformers | 13,477 | Entry not found |
Jeevesh8/init_bert_ft_qqp-61 | 097ff802b2eca98d5480baaa6ff875b2f9936cae | 2022-06-02T12:41:41.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-61 | 8 | null | transformers | 13,478 | Entry not found |
Jeevesh8/init_bert_ft_qqp-62 | 0b3813b3111561aa01789c44d38c3232f1e314dd | 2022-06-02T12:42:29.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-62 | 8 | null | transformers | 13,479 | Entry not found |
Jeevesh8/init_bert_ft_qqp-47 | 8513a6f7da0321943894907cf82c223ebbfa8041 | 2022-06-02T12:39:28.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-47 | 8 | null | transformers | 13,480 | Entry not found |
Jeevesh8/init_bert_ft_qqp-49 | 48cb2bd4fb65235e1886fcf29a1b8072622ee38b | 2022-06-02T12:39:43.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-49 | 8 | null | transformers | 13,481 | Entry not found |
Jeevesh8/init_bert_ft_qqp-46 | 0f972097180411882f39ec7cae1f58f113d4658e | 2022-06-02T12:39:27.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-46 | 8 | null | transformers | 13,482 | Entry not found |
Jeevesh8/init_bert_ft_qqp-42 | 144e46ed31d26f708efba9e50dc96eed438f5235 | 2022-06-02T12:39:27.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-42 | 8 | null | transformers | 13,483 | Entry not found |
Jeevesh8/init_bert_ft_qqp-41 | df643f37101099922472348771d2195c5bac1214 | 2022-06-02T12:39:30.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-41 | 8 | null | transformers | 13,484 | Entry not found |
Jeevesh8/init_bert_ft_qqp-39 | 720e666c9677b38769ff418793d07e593be45915 | 2022-06-02T12:41:29.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-39 | 8 | null | transformers | 13,485 | Entry not found |
Jeevesh8/init_bert_ft_qqp-50 | f0988c12ca0b4940c058fa560bcd0038b83c45f8 | 2022-06-02T12:39:35.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-50 | 8 | null | transformers | 13,486 | Entry not found |
Jeevesh8/init_bert_ft_qqp-64 | 2c0845c05d7c23bd9148e3e0c2bdfa23e21c11e0 | 2022-06-02T12:40:47.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-64 | 8 | null | transformers | 13,487 | Entry not found |
Jeevesh8/init_bert_ft_qqp-65 | 4deacd760df39605be27e3067051b696a81f0b01 | 2022-06-02T12:40:00.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-65 | 8 | null | transformers | 13,488 | Entry not found |
Jeevesh8/init_bert_ft_qqp-63 | 12832cd3a09d080b44b65a5e03e757bd93db28c0 | 2022-06-02T12:40:48.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-63 | 8 | null | transformers | 13,489 | Entry not found |
Jeevesh8/init_bert_ft_qqp-36 | 490a3538c45e526812d876156703ea6680d71a66 | 2022-06-02T12:40:02.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-36 | 8 | null | transformers | 13,490 | Entry not found |
Jeevesh8/init_bert_ft_qqp-66 | 895f2394d4c3c76994265995566818b43aece788 | 2022-06-02T12:40:08.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-66 | 8 | null | transformers | 13,491 | Entry not found |
Jeevesh8/init_bert_ft_qqp-67 | e98a08248d7912d5c96845f3edb32a5dd55f214e | 2022-06-02T12:40:53.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-67 | 8 | null | transformers | 13,492 | Entry not found |
Jeevesh8/init_bert_ft_qqp-68 | 6d3e3d21d206418dfe82790b3725fcea9d7a72c0 | 2022-06-02T12:40:29.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-68 | 8 | null | transformers | 13,493 | Entry not found |
Jeevesh8/init_bert_ft_qqp-69 | 11b27b2e536206739dc19c848cd2d1daf4869c22 | 2022-06-02T12:40:32.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-69 | 8 | null | transformers | 13,494 | Entry not found |
Jeevesh8/init_bert_ft_qqp-70 | b5550b4ffdb2cc475388065174fed7dba2e2f884 | 2022-06-02T12:40:32.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-70 | 8 | null | transformers | 13,495 | Entry not found |
Jeevesh8/init_bert_ft_qqp-71 | 0efd309a4f4e7b6f27d930edc6fdd32ee4429b6b | 2022-06-02T12:40:32.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-71 | 8 | null | transformers | 13,496 | Entry not found |
Jeevesh8/init_bert_ft_qqp-73 | d0bd03df693004e1e18d9a7bb90b9561f03cf77f | 2022-06-02T12:40:39.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-73 | 8 | null | transformers | 13,497 | Entry not found |
Jeevesh8/init_bert_ft_qqp-72 | c2463c4aaeafd9947ef1e9503f7135e5a1294c40 | 2022-06-02T12:40:35.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-72 | 8 | null | transformers | 13,498 | Entry not found |
Jeevesh8/init_bert_ft_qqp-74 | afd13879fc17345e3921634d04545ddbc3a68617 | 2022-06-02T12:45:11.000Z | [
"pytorch",
"tensorboard",
"bert",
"text-classification",
"transformers"
]
| text-classification | false | Jeevesh8 | null | Jeevesh8/init_bert_ft_qqp-74 | 8 | null | transformers | 13,499 | Entry not found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.