modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
diegozs97/sciie-seed-3-200k | 862ec5f180898d7637392acf2ee1b403b6328069 | 2021-12-07T19:05:13.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-3-200k | 1 | null | transformers | 28,900 | Entry not found |
diegozs97/sciie-seed-3-20k | c8133a745c9f248f01398d579bfb89fe7b0d247a | 2021-12-07T15:35:44.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-3-20k | 1 | null | transformers | 28,901 | Entry not found |
diegozs97/sciie-seed-3-400k | 9193a7b6355aa5c64c327576ae153a8bedbf73a8 | 2021-12-07T15:51:52.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-3-400k | 1 | null | transformers | 28,902 | Entry not found |
diegozs97/sciie-seed-3-60k | 377bb21400a2f5d8036a92e74d5ae5e8d2d92dde | 2021-12-07T15:44:44.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-3-60k | 1 | null | transformers | 28,903 | Entry not found |
diegozs97/sciie-seed-3-700k | 039724cf78e4660897691c42712faa9ba79f736f | 2021-12-07T16:00:41.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-3-700k | 1 | null | transformers | 28,904 | Entry not found |
diegozs97/sciie-seed-4-2000k | 974d46c22d0edb3d29feba95adc25091d4a5ede3 | 2021-12-07T22:34:52.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-4-2000k | 1 | null | transformers | 28,905 | Entry not found |
diegozs97/sciie-seed-4-400k | fb5321040cbd81b21dff5bba3514673683c50557 | 2021-12-07T21:06:14.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-4-400k | 1 | null | transformers | 28,906 | Entry not found |
diegozs97/sciie-seed-4-60k | e115a78e517c5a58f24eecd1746ee27a7d061a77 | 2021-12-07T20:51:42.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-4-60k | 1 | null | transformers | 28,907 | Entry not found |
diegozs97/sciie-seed-4-700k | 2f4276e7225b3f07fead48367db4eadb88a39203 | 2021-12-07T21:11:05.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/sciie-seed-4-700k | 1 | null | transformers | 28,908 | Entry not found |
diegozs97/test_model | 08153bcbc84a29da8e0d819e15b8a9cef68e7172 | 2021-12-06T23:07:46.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | diegozs97 | null | diegozs97/test_model | 1 | null | transformers | 28,909 | Entry not found |
disdamoe/TheGreatManipulator | 314922e73cce2c13349eb937a273082bd40dac5f | 2021-12-12T18:58:42.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | disdamoe | null | disdamoe/TheGreatManipulator | 1 | null | transformers | 28,910 | ---
tags:
- conversational
---
# Moe DialoGPT Model |
dk16gaming/DialoGPT-small-HarryPotter | d88bef4c9a4a14addec8e414996ff126705381d5 | 2021-09-21T01:57:24.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | dk16gaming | null | dk16gaming/DialoGPT-small-HarryPotter | 1 | null | transformers | 28,911 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
dobbytk/KSL-BERT | 10530f5b646f4b632709b0eeedbcb1f4a24d9582 | 2021-10-20T13:19:07.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | dobbytk | null | dobbytk/KSL-BERT | 1 | null | transformers | 28,912 | Entry not found |
docketanalyzer/distilroberta-base-ddlm | 0610142f18a6b7c605098f02fb101bc854344dc1 | 2021-05-20T16:12:56.000Z | [
"pytorch",
"jax",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | docketanalyzer | null | docketanalyzer/distilroberta-base-ddlm | 1 | null | transformers | 28,913 | Entry not found |
donggyu/mnmt | efeee89d6f28e99e08a3ce5252ca3e620643ec67 | 2021-11-30T05:27:14.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | donggyu | null | donggyu/mnmt | 1 | null | transformers | 28,914 | Entry not found |
donhuang/game_roberta_finetuned_base_wwm | bfe2b554742c0bd9d15cea32a93b26ac06c946ce | 2021-07-19T06:42:36.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | donhuang | null | donhuang/game_roberta_finetuned_base_wwm | 1 | null | transformers | 28,915 | Entry not found |
dpasch01/finetune-clm-employment | 4211876bddd72b1879c15b9f331187a40fc84c1e | 2021-12-22T07:59:51.000Z | [
"pytorch",
"tensorboard",
"roberta",
"fill-mask",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | fill-mask | false | dpasch01 | null | dpasch01/finetune-clm-employment | 1 | null | transformers | 28,916 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: finetune-clm-employment
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetune-clm-employment
This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.8445
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.3283 | 1.0 | 3989 | 1.9578 |
| 2.0824 | 2.0 | 7978 | 1.9013 |
| 1.9936 | 3.0 | 11967 | 1.8625 |
### Framework versions
- Transformers 4.14.1
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
|
dragonStyle/bert-303-step35000 | 88bb30af334e97e972b3d4fb84244c1d209b9831 | 2021-06-21T03:01:59.000Z | [
"pytorch",
"bert",
"transformers"
] | null | false | dragonStyle | null | dragonStyle/bert-303-step35000 | 1 | null | transformers | 28,917 | 这是一个git lfs项目。
没有改造数据前的模型性能:
knowledge points - max length is 1566, min length is 3, ave length is 87.96, 95% quantile is 490.
question and answer - max length is 303, min length is 8, ave length is 47.09, 95% quantile is 119.
303精度为:2562/5232=48.97%
|
dreamline2/DialoGPT-small-joshua-demo | 4813c7676a3626f5bd1c85c33ac21422646f7caf | 2022-01-22T07:26:52.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | dreamline2 | null | dreamline2/DialoGPT-small-joshua-demo | 1 | null | transformers | 28,918 | ---
tags:
- conversational
---
# My Awesome Model |
dudesparsh/tweet_GPT | 5b86e3ec44f0526128ca23043038ad4c8760f16b | 2021-05-21T15:41:54.000Z | [
"pytorch",
"jax",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | dudesparsh | null | dudesparsh/tweet_GPT | 1 | null | transformers | 28,919 | Entry not found |
dundar/wav2vec2-large-xlsr-53-lithuanian | 8c42544edf5bbdf50fa5417b80b70b45a3c42b4f | 2021-07-06T01:34:27.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"lt",
"dataset:common_voice",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | dundar | null | dundar/wav2vec2-large-xlsr-53-lithuanian | 1 | null | transformers | 28,920 | ---
language: lt
datasets:
- common_voice
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Lithuanian by Enes Burak Dundar
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice lt
type: common_voice
args: lt
metrics:
- name: Test WER
type: wer
value: 35.87
---
# Wav2Vec2-Large-XLSR-53-Lithuanian
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Lithuanian using the [Common Voice](https://huggingface.co/datasets/common_voice)
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "lt", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("dundar/wav2vec2-large-xlsr-53-lithuanian")
model = Wav2Vec2ForCTC.from_pretrained("dundar/wav2vec2-large-xlsr-53-lithuanian")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Lithuanian test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "lt", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("dundar/wav2vec2-large-xlsr-53-lithuanian")
model = Wav2Vec2ForCTC.from_pretrained("dundar/wav2vec2-large-xlsr-53-lithuanian")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 35.87 %
## Training
The Common Voice datasets `except the test` set were used for training.
The script used for training can be found [here](https://github.com/ebdundar/) |
dundar/wav2vec2-large-xlsr-53-turkish | 9fe6a45fb1a694dcbb603790e18163f1ba197ce4 | 2021-07-06T01:36:42.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"tr",
"dataset:common_voice",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | dundar | null | dundar/wav2vec2-large-xlsr-53-turkish | 1 | 1 | transformers | 28,921 | ---
language: tr
datasets:
- common_voice
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Turkish by Enes Burak Dundar
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice tr
type: common_voice
args: tr
metrics:
- name: Test WER
type: wer
value: 24.86
---
# Wav2Vec2-Large-XLSR-53-Turkish
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Turkish using the [Common Voice](https://huggingface.co/datasets/common_voice)
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "tr", split="test[:2%]") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site.
processor = Wav2Vec2Processor.from_pretrained("dundar/wav2vec2-large-xlsr-53-turkish")
model = Wav2Vec2ForCTC.from_pretrained("dundar/wav2vec2-large-xlsr-53-turkish")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Turkish test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "tr", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("dundar/wav2vec2-large-xlsr-53-turkish")
model = Wav2Vec2ForCTC.from_pretrained("dundar/wav2vec2-large-xlsr-53-turkish")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\'\:\"\“\%\‘\”\�]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 24.86 %
## Training
The Common Voice datasets `except the test` set were used for training.
The script used for training can be found [here](https://github.com/ebdundar/) |
eclare/DialoGPT-small-SCHAEFER | 7e12ef9c19125cc80e4163119eaee073990e5012 | 2021-09-19T06:58:36.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | eclare | null | eclare/DialoGPT-small-SCHAEFER | 1 | null | transformers | 28,922 | ---
tags:
- conversational
---
# Predator DialoGPT-small-SCHAEFER model |
ehdwns1516/bart_finetuned_xsum | 11471c06f8334892c48335ddf7f9749fe7342d0a | 2021-07-30T03:49:31.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | ehdwns1516 | null | ehdwns1516/bart_finetuned_xsum | 1 | null | transformers | 28,923 | # ehdwns1516/bart_finetuned_xsum
* This model has been trained as a [xsum dataset](https://huggingface.co/datasets/xsum).
* Input text what you want to summarize.
review generator DEMO: [Ainize DEMO](https://main-text-summarizer-ehdwns1516.endpoint.ainize.ai/)
review generator API: [Ainize API](https://ainize.web.app/redirect?git_repo=https://github.com/ehdwns1516/text_summarizer)
## Overview
Language model: [facebook/bart-large](https://huggingface.co/facebook/bart-large)
Language: English
Training data: [xsum dataset](https://huggingface.co/datasets/xsum)
Code: See [Ainize Workspace](https://ainize.ai/workspace/create?imageId=hnj95592adzr02xPTqss&git=https://github.com/ehdwns1516/bart_finetuned_xsum-notebook)
## Usage
## In Transformers
```
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("ehdwns1516/bart_finetuned_xsum")
model = AutoModelForSeq2SeqLM.from_pretrained("ehdwns1516/bart_finetuned_xsum")
summarizer = pipeline(
"summarization",
model="ehdwns1516/bart_finetuned_xsum",
tokenizer=tokenizer
)
context = "your context"
result = dict()
result[0] = summarizer(context)[0]
```
|
ehdwns1516/gpt2_review_star2 | dd83fd185d16cf388b16c2430982ab5625a7d32f | 2021-07-23T01:06:41.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | ehdwns1516 | null | ehdwns1516/gpt2_review_star2 | 1 | null | transformers | 28,924 | # gpt2_review_star2
* This model has been trained as a review_body dataset with a star of 2 in the [amazon_review dataset](https://huggingface.co/datasets/amazon_reviews_multi).
* Input text what you want to generate review.
* If the context is longer than 1200 characters, the context may be cut in the middle and the result may not come out well.
review generator DEMO: [Ainize DEMO](https://main-review-generator-ehdwns1516.endpoint.ainize.ai/)
review generator API: [Ainize API](https://ainize.web.app/redirect?git_repo=https://github.com/ehdwns1516/review_generator)
## Model links for each 1 to 5 star
* [ehdwns1516/gpt2_review_star1](https://huggingface.co/ehdwns1516/gpt2_review_star1)
* [ehdwns1516/gpt2_review_star2](https://huggingface.co/ehdwns1516/gpt2_review_star2)
* [ehdwns1516/gpt2_review_star3](https://huggingface.co/ehdwns1516/gpt2_review_star3)
* [ehdwns1516/gpt2_review_star4](https://huggingface.co/ehdwns1516/gpt2_review_star4)
* [ehdwns1516/gpt2_review_star5](https://huggingface.co/ehdwns1516/gpt2_review_star5)
## Overview
Language model: [gpt2](https://huggingface.co/gpt2)
Language: English
Training data: review_body dataset with a star of 2 in the [amazon_review dataset](https://huggingface.co/datasets/amazon_reviews_multi).
Code: See [Ainize Workspace](https://ainize.ai/workspace/create?imageId=hnj95592adzr02xPTqss&git=https://github.com/ehdwns1516/gpt2_review_fine-tunning_note)
## Usage
## In Transformers
```
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("ehdwns1516/gpt2_review_star2")
model = AutoModelWithLMHead.from_pretrained("ehdwns1516/gpt2_review_star2")
generator = pipeline(
"text-generation",
model="ehdwns1516/gpt2_review_star2",
tokenizer=tokenizer
)
context = "your context"
result = dict()
result[0] = generator(context)[0]
```
|
ehdwns1516/gpt2_review_star4 | 9db9820a58c20901bd74f43342267b480dd00a60 | 2021-07-23T01:07:26.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | ehdwns1516 | null | ehdwns1516/gpt2_review_star4 | 1 | null | transformers | 28,925 | # gpt2_review_star4
* This model has been trained as a review_body dataset with a star of 4 in the [amazon_review dataset](https://huggingface.co/datasets/amazon_reviews_multi).
* Input text what you want to generate review.
* If the context is longer than 1200 characters, the context may be cut in the middle and the result may not come out well.
review generator DEMO: [Ainize DEMO](https://main-review-generator-ehdwns1516.endpoint.ainize.ai/)
review generator API: [Ainize API](https://ainize.web.app/redirect?git_repo=https://github.com/ehdwns1516/review_generator)
## Model links for each 1 to 5 star
* [ehdwns1516/gpt2_review_star1](https://huggingface.co/ehdwns1516/gpt2_review_star1)
* [ehdwns1516/gpt2_review_star2](https://huggingface.co/ehdwns1516/gpt2_review_star2)
* [ehdwns1516/gpt2_review_star3](https://huggingface.co/ehdwns1516/gpt2_review_star3)
* [ehdwns1516/gpt2_review_star4](https://huggingface.co/ehdwns1516/gpt2_review_star4)
* [ehdwns1516/gpt2_review_star5](https://huggingface.co/ehdwns1516/gpt2_review_star5)
## Overview
Language model: [gpt2](https://huggingface.co/gpt2)
Language: English
Training data: review_body dataset with a star of 4 in the [amazon_review dataset](https://huggingface.co/datasets/amazon_reviews_multi).
Code: See [Ainize Workspace](https://ainize.ai/workspace/create?imageId=hnj95592adzr02xPTqss&git=https://github.com/ehdwns1516/gpt2_review_fine-tunning_note)
## Usage
## In Transformers
```
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("ehdwns1516/gpt2_review_star3")
model = AutoModelWithLMHead.from_pretrained("ehdwns1516/gpt2_review_star3")
generator = pipeline(
"text-generation",
model="ehdwns1516/gpt2_review_star4",
tokenizer=tokenizer
)
context = "your context"
result = dict()
result[0] = generator(context)[0]
```
|
eklrivera/DialoGPT-small-harrypotter | db65d55c35df6bbcf29ce8d13315dc97117050ef | 2021-08-28T01:51:19.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | eklrivera | null | eklrivera/DialoGPT-small-harrypotter | 1 | null | transformers | 28,926 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
eldor-97/MarianMix_en-10 | 9a49c64b9bbe2c8f852611ed3c0282f8d770c527 | 2022-01-30T23:25:27.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | eldor-97 | null | eldor-97/MarianMix_en-10 | 1 | null | transformers | 28,927 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- bleu
model-index:
- name: MarianMix_en-10
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# MarianMix_en-10
This model is a fine-tuned version of [Helsinki-NLP/opus-tatoeba-en-ja](https://huggingface.co/Helsinki-NLP/opus-tatoeba-en-ja) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.0752
- Bleu: 14.601
- Gen Len: 45.8087
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 32
- eval_batch_size: 32
- seed: 99
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 10
- num_epochs: 5
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:--------:|
| 2.1136 | 0.44 | 500 | 2.0044 | 0.2655 | 109.0201 |
| 1.1422 | 0.89 | 1000 | 1.7516 | 1.4123 | 71.0 |
| 0.9666 | 1.33 | 1500 | 1.5219 | 3.6611 | 64.6888 |
| 0.8725 | 1.78 | 2000 | 1.3606 | 4.6539 | 77.1641 |
| 0.7655 | 2.22 | 2500 | 1.2586 | 8.3456 | 60.3837 |
| 0.7149 | 2.67 | 3000 | 1.1953 | 11.2247 | 50.5921 |
| 0.6719 | 3.11 | 3500 | 1.1541 | 10.4303 | 54.3776 |
| 0.6265 | 3.56 | 4000 | 1.1186 | 13.3231 | 48.283 |
| 0.6157 | 4.0 | 4500 | 1.0929 | 13.8467 | 46.569 |
| 0.5736 | 4.44 | 5000 | 1.0848 | 14.2731 | 45.5035 |
| 0.5683 | 4.89 | 5500 | 1.0752 | 14.601 | 45.8087 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.9.1
- Datasets 1.17.0
- Tokenizers 0.10.3
|
elgeish/cs224n-squad2.0-albert-xxlarge-v1 | 503bdb37bcfa9fa4030362563ca38266f1ed52d2 | 2020-12-11T21:39:01.000Z | [
"pytorch",
"albert",
"question-answering",
"arxiv:2004.07067",
"transformers",
"exbert",
"autotrain_compatible"
] | question-answering | false | elgeish | null | elgeish/cs224n-squad2.0-albert-xxlarge-v1 | 1 | null | transformers | 28,928 | ---
tags:
- exbert
---
## CS224n SQuAD2.0 Project Dataset
The goal of this model is to save CS224n students GPU time when establishing
baselines to beat for the [Default Final Project](http://web.stanford.edu/class/cs224n/project/default-final-project-handout.pdf).
The training set used to fine-tune this model is the same as
the [official one](https://rajpurkar.github.io/SQuAD-explorer/); however,
evaluation and model selection were performed using roughly half of the official
dev set, 6078 examples, picked at random. The data files can be found at
<https://github.com/elgeish/squad/tree/master/data> — this is the Winter 2020
version. Given that the official SQuAD2.0 dev set contains the project's test
set, students must make sure not to use the official SQuAD2.0 dev set in any way
— including the use of models fine-tuned on the official SQuAD2.0, since they
used the official SQuAD2.0 dev set for model selection.
<a href="https://huggingface.co/exbert/?model=elgeish/cs224n-squad2.0-albert-xxlarge-v1">
<img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png">
</a>
## Results
```json
{
"exact": 85.93287265547877,
"f1": 88.91258331187983,
"total": 6078,
"HasAns_exact": 84.36426116838489,
"HasAns_f1": 90.58786301361013,
"HasAns_total": 2910,
"NoAns_exact": 87.37373737373737,
"NoAns_f1": 87.37373737373737,
"NoAns_total": 3168,
"best_exact": 85.93287265547877,
"best_exact_thresh": 0.0,
"best_f1": 88.91258331187993,
"best_f1_thresh": 0.0
}
```
## Notable Arguments
```json
{
"do_lower_case": true,
"doc_stride": 128,
"fp16": false,
"fp16_opt_level": "O1",
"gradient_accumulation_steps": 24,
"learning_rate": 3e-05,
"max_answer_length": 30,
"max_grad_norm": 1,
"max_query_length": 64,
"max_seq_length": 512,
"model_name_or_path": "albert-xxlarge-v1",
"model_type": "albert",
"num_train_epochs": 4,
"per_gpu_train_batch_size": 1,
"save_steps": 1000,
"seed": 42,
"train_batch_size": 1,
"version_2_with_negative": true,
"warmup_steps": 814,
"weight_decay": 0
}
```
## Environment Setup
```json
{
"transformers": "2.5.1",
"pytorch": "1.4.0=py3.6_cuda10.1.243_cudnn7.6.3_0",
"python": "3.6.5=hc3d631a_2",
"os": "Linux 4.15.0-1060-aws #62-Ubuntu SMP Tue Feb 11 21:23:22 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux",
"gpu": "Tesla V100-SXM2-16GB"
}
```
## How to Cite
```BibTeX
@misc{elgeish2020gestalt,
title={Gestalt: a Stacking Ensemble for SQuAD2.0},
author={Mohamed El-Geish},
journal={arXiv e-prints},
archivePrefix={arXiv},
eprint={2004.07067},
year={2020},
}
```
## Related Models
* [elgeish/cs224n-squad2.0-albert-base-v2](https://huggingface.co/elgeish/cs224n-squad2.0-albert-base-v2)
* [elgeish/cs224n-squad2.0-albert-large-v2](https://huggingface.co/elgeish/cs224n-squad2.0-albert-large-v2)
* [elgeish/cs224n-squad2.0-distilbert-base-uncased](https://huggingface.co/elgeish/cs224n-squad2.0-distilbert-base-uncased)
* [elgeish/cs224n-squad2.0-roberta-base](https://huggingface.co/elgeish/cs224n-squad2.0-roberta-base)
|
eliasbe/IceBERT-finetuned-ner | 26889a1111bab9030c508a95ad31be8493a0f84b | 2021-10-05T12:35:51.000Z | [
"pytorch",
"tensorboard",
"roberta",
"token-classification",
"dataset:mim_gold_ner",
"transformers",
"generated_from_trainer",
"license:gpl-3.0",
"model-index",
"autotrain_compatible"
] | token-classification | false | eliasbe | null | eliasbe/IceBERT-finetuned-ner | 1 | null | transformers | 28,929 | ---
license: gpl-3.0
tags:
- generated_from_trainer
datasets:
- mim_gold_ner
model-index:
- name: IceBERT-finetuned-ner
widget:
- text: systurnar guðrún og monique voru einar í skóginum umkringdar víði, eik og reyni með þá ósk að sameinast fjölskyldu sinni sem fór á mai thai og í bíó paradís að sjá jim carey leika í the eternal sunshine of the spotless mind.
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# IceBERT-finetuned-ner
This model is a fine-tuned version of [eliasbe/IceBERT-finetuned-ner](https://huggingface.co/eliasbe/IceBERT-finetuned-ner) on the mim_gold_ner dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Framework versions
- Transformers 4.11.2
- Pytorch 1.9.0+cu102
- Datasets 1.12.1
- Tokenizers 0.10.3
|
eliotm/t5-small-finetuned-en-to-ro-LR_1e-3 | 1481505e2aeaf37477996b557f09a86bd0ece164 | 2021-12-02T14:05:14.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:wmt16",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | eliotm | null | eliotm/t5-small-finetuned-en-to-ro-LR_1e-3 | 1 | null | transformers | 28,930 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- wmt16
metrics:
- bleu
model-index:
- name: t5-small-finetuned-en-to-ro-LR_1e-3
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: wmt16
type: wmt16
args: ro-en
metrics:
- name: Bleu
type: bleu
value: 7.1606
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-en-to-ro-LR_1e-3
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the wmt16 dataset.
It achieves the following results on the evaluation set:
- Loss: 1.5215
- Bleu: 7.1606
- Gen Len: 18.2451
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.001
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|
| 0.6758 | 1.0 | 7629 | 1.5215 | 7.1606 | 18.2451 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.16.1
- Tokenizers 0.10.3
|
eliotm/t5-small-finetuned-en-to-ro-fp16_off | 874aa3e1e06d45355af9de05626c0c426c805596 | 2021-12-03T03:05:19.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:wmt16",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | eliotm | null | eliotm/t5-small-finetuned-en-to-ro-fp16_off | 1 | null | transformers | 28,931 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- wmt16
metrics:
- bleu
model-index:
- name: t5-small-finetuned-en-to-ro-fp16_off
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: wmt16
type: wmt16
args: ro-en
metrics:
- name: Bleu
type: bleu
value: 5.9132
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-en-to-ro-fp16_off
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the wmt16 dataset.
It achieves the following results on the evaluation set:
- Loss: 1.8351
- Bleu: 5.9132
- Gen Len: 18.2656
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|
| 0.8501 | 1.0 | 7629 | 1.8351 | 5.9132 | 18.2656 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.16.1
- Tokenizers 0.10.3
|
eliotm/t5-small-finetuned-en-to-ro-lr_2e-6 | fc1a35a8770ae924aa57a6ec7a33e4ef9b4b62b6 | 2021-12-02T03:07:16.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:wmt16",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | eliotm | null | eliotm/t5-small-finetuned-en-to-ro-lr_2e-6 | 1 | null | transformers | 28,932 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- wmt16
metrics:
- bleu
model-index:
- name: t5-small-finetuned-en-to-ro-lr_2e-6
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: wmt16
type: wmt16
args: ro-en
metrics:
- name: Bleu
type: bleu
value: 7.2935
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-en-to-ro-lr_2e-6
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the wmt16 dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4232
- Bleu: 7.2935
- Gen Len: 18.2521
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-06
- train_batch_size: 10
- eval_batch_size: 10
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 0.04375
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|
| 0.6703 | 0.04 | 2671 | 1.4232 | 7.2935 | 18.2521 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.16.1
- Tokenizers 0.10.3
|
emeson77/wav2vec2-large-xls-r-300m-turkish-colab | f417487d6bc6356e5ab3e55cc540126ea4d43b4c | 2021-12-23T06:25:06.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | emeson77 | null | emeson77/wav2vec2-large-xls-r-300m-turkish-colab | 1 | null | transformers | 28,933 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: wav2vec2-large-xls-r-300m-turkish-colab
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-turkish-colab
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7214
- Wer: 0.5555
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 3.4408 | 7.83 | 400 | 0.8109 | 0.7792 |
| 0.2469 | 15.68 | 800 | 0.6794 | 0.5975 |
| 0.0871 | 23.52 | 1200 | 0.7214 | 0.5555 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.13.3
- Tokenizers 0.10.3
|
emre/distilbert-base-uncased-finetuned-squad | 627d4a143e6e46d77f5d558e28adb1ac775cd3d0 | 2022-02-12T23:05:04.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | emre | null | emre/distilbert-base-uncased-finetuned-squad | 1 | null | transformers | 28,934 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1620
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 1.2256 | 1.0 | 5533 | 1.1620 |
| 0.9551 | 2.0 | 11066 | 1.1237 |
| 0.7726 | 3.0 | 16599 | 1.1620 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
emre/wav2vec2-xls-r-300m-Turkish-Tr-med | a2e02acb6e44040de93d55f9d8a1f56416ddce18 | 2022-02-10T22:56:56.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | emre | null | emre/wav2vec2-xls-r-300m-Turkish-Tr-med | 1 | null | transformers | 28,935 | ---
license: apache-2.0
tags:
- generated_from_trainer
- robust-speech-event
datasets:
- common_voice
model-index:
- name: wav2vec2-xls-r-300m-Turkish-Tr-med
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-xls-r-300m-Turkish-Tr-med
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4727
- Wer: 0.4677
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 60
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 4.8093 | 4.21 | 400 | 2.7831 | 1.0 |
| 0.9881 | 8.42 | 800 | 0.5088 | 0.6681 |
| 0.3519 | 12.63 | 1200 | 0.4496 | 0.6007 |
| 0.2436 | 16.84 | 1600 | 0.4993 | 0.5654 |
| 0.1874 | 21.05 | 2000 | 0.4793 | 0.5530 |
| 0.1561 | 25.26 | 2400 | 0.5187 | 0.5589 |
| 0.1336 | 29.47 | 2800 | 0.5135 | 0.5311 |
| 0.1163 | 33.68 | 3200 | 0.4960 | 0.5143 |
| 0.1056 | 37.89 | 3600 | 0.4795 | 0.5045 |
| 0.0959 | 42.11 | 4000 | 0.4883 | 0.4987 |
| 0.0819 | 46.32 | 4400 | 0.4799 | 0.4903 |
| 0.0756 | 50.53 | 4800 | 0.4822 | 0.4831 |
| 0.0692 | 54.74 | 5200 | 0.4621 | 0.4762 |
| 0.062 | 58.95 | 5600 | 0.4727 | 0.4677 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.14.0
- Tokenizers 0.10.3
|
emre/wav2vec2-xls-r-300m-Turkish-Tr-small | 027ac27180e9b50f9b3a733aa79830199aee7b8d | 2022-02-10T22:55:52.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | emre | null | emre/wav2vec2-xls-r-300m-Turkish-Tr-small | 1 | null | transformers | 28,936 | ---
license: apache-2.0
tags:
- generated_from_trainer
- robust-speech-event
datasets:
- common_voice
model-index:
- name: wav2vec2-xls-r-300m-Turkish-Tr-small
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-xls-r-300m-Turkish-Tr-small
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4375
- Wer: 0.5050
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 4.8735 | 4.21 | 400 | 2.8173 | 1.0002 |
| 1.0073 | 8.42 | 800 | 0.4981 | 0.6717 |
| 0.3395 | 12.63 | 1200 | 0.4470 | 0.5866 |
| 0.2254 | 16.84 | 1600 | 0.4349 | 0.5491 |
| 0.1648 | 21.05 | 2000 | 0.4454 | 0.5284 |
| 0.1325 | 25.26 | 2400 | 0.4552 | 0.5131 |
| 0.1102 | 29.47 | 2800 | 0.4375 | 0.5050 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.14.0
- Tokenizers 0.10.3
|
emre/wav2vec2-xls-r-300m-hy-AM-CV8-v1 | a2bc62ea7ea0f0b7e89850418fad2d50fb67c368 | 2022-02-11T15:29:46.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"robust-speech-event",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | emre | null | emre/wav2vec2-xls-r-300m-hy-AM-CV8-v1 | 1 | null | transformers | 28,937 | ---
license: apache-2.0
tags:
- generated_from_trainer
- robust-speech-event
datasets:
- common_voice
model-index:
- name: wav2vec2-xls-r-300m-hy-AM-CV8-v1
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-xls-r-300m-hy-AM-CV8-v1
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9145
- Wer: 0.9598
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 300
- num_epochs: 170
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:------:|:----:|:---------------:|:------:|
| 5.7132 | 83.31 | 500 | 1.9274 | 1.0523 |
| 1.017 | 166.62 | 1000 | 0.9145 | 0.9598 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.10.3
|
enriqueyanh/bert1 | 25b51925902f0c0a3b7b528870cd33e4297a3cd3 | 2021-07-18T02:41:14.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | enriqueyanh | null | enriqueyanh/bert1 | 1 | null | transformers | 28,938 | Entry not found |
enriqueyanh/bert_cn | de8ee47104a69beefe0186621305af0924e88ba3 | 2021-07-19T11:50:22.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | enriqueyanh | null | enriqueyanh/bert_cn | 1 | null | transformers | 28,939 | Entry not found |
eooitom/phobertlong4096 | 340948a3573d7b479d2bbc5acf725633f1d9a5c7 | 2021-07-18T17:26:31.000Z | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | eooitom | null | eooitom/phobertlong4096 | 1 | null | transformers | 28,940 | Entry not found |
ericRosello/distilbert-base-uncased-finetuned-squad-frozen-v1 | 2502f0252952b8f5518474ba38c805f5b9ba95d6 | 2022-01-04T12:14:41.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | ericRosello | null | ericRosello/distilbert-base-uncased-finetuned-squad-frozen-v1 | 1 | null | transformers | 28,941 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 4.3629
## Model description
Base model weights were frozen leaving only to finetune the last layer (qa outputs).
## Training and evaluation data
Achieved EM: 4.7776726584673606, F1: 11.440882287905591
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 4.679 | 1.0 | 5533 | 4.6713 |
| 4.4171 | 2.0 | 11066 | 4.4218 |
| 4.3464 | 3.0 | 16599 | 4.3629 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
|
ericRosello/trial | 2840e8a5bb36a6061df33d69850590da1fa6681f | 2021-12-30T16:35:05.000Z | [
"pytorch",
"distilbert",
"feature-extraction",
"transformers"
] | feature-extraction | false | ericRosello | null | ericRosello/trial | 1 | null | transformers | 28,942 | Entry not found |
erica/kc_900 | 688e90d168187e9ba506af1823cba4ef67924243 | 2021-05-22T03:41:14.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | erica | null | erica/kc_900 | 1 | null | transformers | 28,943 | Entry not found |
erica/kob900 | 076f595aed843bce2e0b1bd6154c9ef52dac8e6d | 2021-05-20T12:53:34.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | erica | null | erica/kob900 | 1 | null | transformers | 28,944 | Entry not found |
erica/krm_fin | 62081b3738def16632f3529b8e1bde141446c3f3 | 2021-11-18T02:24:55.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | erica | null | erica/krm_fin | 1 | null | transformers | 28,945 | Entry not found |
ericchchiu/dummy-model | 53c51e971edf428756f56f9c44c213628e6323e6 | 2021-09-19T05:11:06.000Z | [
"pytorch",
"camembert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | ericchchiu | null | ericchchiu/dummy-model | 1 | null | transformers | 28,946 | Entry not found |
eunjin/koMHBERT-kobert-based-v1 | 4a664b73a3c4a95dea65f2eebdcbc09474bab905 | 2021-05-19T16:47:55.000Z | [
"pytorch",
"jax",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | eunjin | null | eunjin/koMHBERT-kobert-based-v1 | 1 | null | transformers | 28,947 | Entry not found |
f00d4tehg0dz/Peppa | 196a2344d6e0281257c390a815faa5356f53b1cd | 2021-08-28T03:52:56.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | f00d4tehg0dz | null | f00d4tehg0dz/Peppa | 1 | null | transformers | 28,948 | ---
tags:
- conversational
---
#peppa pig chat bot |
fabianafatsawo/math_problem_NLtoACE_BART | 3beabc3ad8dc572a315aaec8f9856dfc4926fd34 | 2022-02-03T20:55:00.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | fabianafatsawo | null | fabianafatsawo/math_problem_NLtoACE_BART | 1 | null | transformers | 28,949 | Entry not found |
facebook/wav2vec2-base-10k-voxpopuli-ft-cs | 6b0285fee92dcd520b0c49246eeaaf80689501aa | 2021-07-06T01:48:35.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"cs",
"arxiv:2101.00390",
"transformers",
"audio",
"voxpopuli",
"license:cc-by-nc-4.0"
] | automatic-speech-recognition | false | facebook | null | facebook/wav2vec2-base-10k-voxpopuli-ft-cs | 1 | null | transformers | 28,950 | ---
language: cs
tags:
- audio
- automatic-speech-recognition
- voxpopuli
license: cc-by-nc-4.0
---
# Wav2Vec2-Base-VoxPopuli-Finetuned
[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) base model pretrained on the 10K unlabeled subset of [VoxPopuli corpus](https://arxiv.org/abs/2101.00390) and fine-tuned on the transcribed data in cs (refer to Table 1 of paper for more information).
**Paper**: *[VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation
Learning, Semi-Supervised Learning and Interpretation](https://arxiv.org/abs/2101.00390)*
**Authors**: *Changhan Wang, Morgane Riviere, Ann Lee, Anne Wu, Chaitanya Talnikar, Daniel Haziza, Mary Williamson, Juan Pino, Emmanuel Dupoux* from *Facebook AI*
See the official website for more information, [here](https://github.com/facebookresearch/voxpopuli/)
# Usage for inference
In the following it is shown how the model can be used in inference on a sample of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets)
```python
#!/usr/bin/env python3
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from datasets import load_dataset
import torchaudio
import torch
# resample audio
# load model & processor
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-cs")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-cs")
# load dataset
ds = load_dataset("common_voice", "cs", split="validation[:1%]")
# common voice does not match target sampling rate
common_voice_sample_rate = 48000
target_sample_rate = 16000
resampler = torchaudio.transforms.Resample(common_voice_sample_rate, target_sample_rate)
# define mapping fn to read in sound file and resample
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
speech = resampler(speech)
batch["speech"] = speech[0]
return batch
# load all audio files
ds = ds.map(map_to_array)
# run inference on the first 5 data samples
inputs = processor(ds[:5]["speech"], sampling_rate=target_sample_rate, return_tensors="pt", padding=True)
# inference
logits = model(**inputs).logits
predicted_ids = torch.argmax(logits, axis=-1)
print(processor.batch_decode(predicted_ids))
```
|
facebook/wav2vec2-base-10k-voxpopuli-ft-fi | 8a507c332fc29a126f4c4e0f8e2be6b044806b64 | 2021-07-06T01:49:51.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"fi",
"arxiv:2101.00390",
"transformers",
"audio",
"voxpopuli",
"license:cc-by-nc-4.0"
] | automatic-speech-recognition | false | facebook | null | facebook/wav2vec2-base-10k-voxpopuli-ft-fi | 1 | null | transformers | 28,951 | ---
language: fi
tags:
- audio
- automatic-speech-recognition
- voxpopuli
license: cc-by-nc-4.0
---
# Wav2Vec2-Base-VoxPopuli-Finetuned
[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) base model pretrained on the 10K unlabeled subset of [VoxPopuli corpus](https://arxiv.org/abs/2101.00390) and fine-tuned on the transcribed data in fi (refer to Table 1 of paper for more information).
**Paper**: *[VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation
Learning, Semi-Supervised Learning and Interpretation](https://arxiv.org/abs/2101.00390)*
**Authors**: *Changhan Wang, Morgane Riviere, Ann Lee, Anne Wu, Chaitanya Talnikar, Daniel Haziza, Mary Williamson, Juan Pino, Emmanuel Dupoux* from *Facebook AI*
See the official website for more information, [here](https://github.com/facebookresearch/voxpopuli/)
# Usage for inference
In the following it is shown how the model can be used in inference on a sample of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets)
```python
#!/usr/bin/env python3
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from datasets import load_dataset
import torchaudio
import torch
# resample audio
# load model & processor
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-fi")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-fi")
# load dataset
ds = load_dataset("common_voice", "fi", split="validation[:1%]")
# common voice does not match target sampling rate
common_voice_sample_rate = 48000
target_sample_rate = 16000
resampler = torchaudio.transforms.Resample(common_voice_sample_rate, target_sample_rate)
# define mapping fn to read in sound file and resample
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
speech = resampler(speech)
batch["speech"] = speech[0]
return batch
# load all audio files
ds = ds.map(map_to_array)
# run inference on the first 5 data samples
inputs = processor(ds[:5]["speech"], sampling_rate=target_sample_rate, return_tensors="pt", padding=True)
# inference
logits = model(**inputs).logits
predicted_ids = torch.argmax(logits, axis=-1)
print(processor.batch_decode(predicted_ids))
```
|
facebook/wav2vec2-base-10k-voxpopuli-ft-hr | 06e605b9b3bf5a6c6b25b691c82a2f8f65562050 | 2021-07-06T01:50:33.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"hr",
"arxiv:2101.00390",
"transformers",
"audio",
"voxpopuli",
"license:cc-by-nc-4.0"
] | automatic-speech-recognition | false | facebook | null | facebook/wav2vec2-base-10k-voxpopuli-ft-hr | 1 | null | transformers | 28,952 | ---
language: hr
tags:
- audio
- automatic-speech-recognition
- voxpopuli
license: cc-by-nc-4.0
---
# Wav2Vec2-Base-VoxPopuli-Finetuned
[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) base model pretrained on the 10K unlabeled subset of [VoxPopuli corpus](https://arxiv.org/abs/2101.00390) and fine-tuned on the transcribed data in hr (refer to Table 1 of paper for more information).
**Paper**: *[VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation
Learning, Semi-Supervised Learning and Interpretation](https://arxiv.org/abs/2101.00390)*
**Authors**: *Changhan Wang, Morgane Riviere, Ann Lee, Anne Wu, Chaitanya Talnikar, Daniel Haziza, Mary Williamson, Juan Pino, Emmanuel Dupoux* from *Facebook AI*
See the official website for more information, [here](https://github.com/facebookresearch/voxpopuli/)
# Usage for inference
In the following it is shown how the model can be used in inference on a sample of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets)
```python
#!/usr/bin/env python3
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from datasets import load_dataset
import torchaudio
import torch
# resample audio
# load model & processor
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-hr")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-hr")
# load dataset
ds = load_dataset("common_voice", "hr", split="validation[:1%]")
# common voice does not match target sampling rate
common_voice_sample_rate = 48000
target_sample_rate = 16000
resampler = torchaudio.transforms.Resample(common_voice_sample_rate, target_sample_rate)
# define mapping fn to read in sound file and resample
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
speech = resampler(speech)
batch["speech"] = speech[0]
return batch
# load all audio files
ds = ds.map(map_to_array)
# run inference on the first 5 data samples
inputs = processor(ds[:5]["speech"], sampling_rate=target_sample_rate, return_tensors="pt", padding=True)
# inference
logits = model(**inputs).logits
predicted_ids = torch.argmax(logits, axis=-1)
print(processor.batch_decode(predicted_ids))
```
|
facebook/wav2vec2-base-10k-voxpopuli-ft-sk | 2b0347bd9eaa179b29222b649f517f0391754d6b | 2021-07-06T01:52:44.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"sk",
"arxiv:2101.00390",
"transformers",
"audio",
"voxpopuli",
"license:cc-by-nc-4.0"
] | automatic-speech-recognition | false | facebook | null | facebook/wav2vec2-base-10k-voxpopuli-ft-sk | 1 | null | transformers | 28,953 | ---
language: sk
tags:
- audio
- automatic-speech-recognition
- voxpopuli
license: cc-by-nc-4.0
---
# Wav2Vec2-Base-VoxPopuli-Finetuned
[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) base model pretrained on the 10K unlabeled subset of [VoxPopuli corpus](https://arxiv.org/abs/2101.00390) and fine-tuned on the transcribed data in sk (refer to Table 1 of paper for more information).
**Paper**: *[VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation
Learning, Semi-Supervised Learning and Interpretation](https://arxiv.org/abs/2101.00390)*
**Authors**: *Changhan Wang, Morgane Riviere, Ann Lee, Anne Wu, Chaitanya Talnikar, Daniel Haziza, Mary Williamson, Juan Pino, Emmanuel Dupoux* from *Facebook AI*
See the official website for more information, [here](https://github.com/facebookresearch/voxpopuli/)
# Usage for inference
In the following it is shown how the model can be used in inference on a sample of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets)
```python
#!/usr/bin/env python3
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from datasets import load_dataset
import torchaudio
import torch
# resample audio
# load model & processor
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-sk")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-10k-voxpopuli-ft-sk")
# load dataset
ds = load_dataset("common_voice", "sk", split="validation[:1%]")
# common voice does not match target sampling rate
common_voice_sample_rate = 48000
target_sample_rate = 16000
resampler = torchaudio.transforms.Resample(common_voice_sample_rate, target_sample_rate)
# define mapping fn to read in sound file and resample
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
speech = resampler(speech)
batch["speech"] = speech[0]
return batch
# load all audio files
ds = ds.map(map_to_array)
# run inference on the first 5 data samples
inputs = processor(ds[:5]["speech"], sampling_rate=target_sample_rate, return_tensors="pt", padding=True)
# inference
logits = model(**inputs).logits
predicted_ids = torch.argmax(logits, axis=-1)
print(processor.batch_decode(predicted_ids))
```
|
facebook/wav2vec2-large-10k-voxpopuli | a77cf9dac84c3b9e334540ce020518f1b291eaa0 | 2021-07-06T01:57:22.000Z | [
"pytorch",
"jax",
"wav2vec2",
"pretraining",
"multilingual",
"arxiv:2101.00390",
"transformers",
"audio",
"automatic-speech-recognition",
"voxpopuli",
"license:cc-by-nc-4.0"
] | automatic-speech-recognition | false | facebook | null | facebook/wav2vec2-large-10k-voxpopuli | 1 | null | transformers | 28,954 | ---
language: multilingual
tags:
- audio
- automatic-speech-recognition
- voxpopuli
license: cc-by-nc-4.0
---
# Wav2Vec2-Large-VoxPopuli
[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) large model pretrained on the 10k unlabeled subset of [VoxPopuli corpus](https://arxiv.org/abs/2101.00390).
**Paper**: *[VoxPopuli: A Large-Scale Multilingual Speech Corpus for Representation
Learning, Semi-Supervised Learning and Interpretation](https://arxiv.org/abs/2101.00390)*
**Authors**: *Changhan Wang, Morgane Riviere, Ann Lee, Anne Wu, Chaitanya Talnikar, Daniel Haziza, Mary Williamson, Juan Pino, Emmanuel Dupoux* from *Facebook AI*
See the official website for more information, [here](https://github.com/facebookresearch/voxpopuli/)
# Fine-Tuning
Please refer to [this blog](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) on how to fine-tune this model on a specific language. Note that you should replace `"facebook/wav2vec2-large-xlsr-53"` with this checkpoint for fine-tuning.
|
fadhilarkan/gq-indo-k | 9356d907efc529253bc79fe8985566e1220cc936 | 2021-08-22T22:25:31.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | fadhilarkan | null | fadhilarkan/gq-indo-k | 1 | null | transformers | 28,955 | ---
metrics:
- rouge
model-index:
- name: gq-indo-k
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# gq-indo-k
This model was trained from scratch on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 2.7905
- Rouge1: 22.5734
- Rouge2: 6.555
- Rougel: 20.9491
- Rougelsum: 20.9509
- Gen Len: 12.0767
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 10
- eval_batch_size: 10
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:|
| 2.9355 | 1.0 | 13032 | 2.8563 | 22.4828 | 6.5456 | 20.8782 | 20.8772 | 11.915 |
| 2.825 | 2.0 | 26064 | 2.7993 | 22.547 | 6.5815 | 20.8937 | 20.8973 | 12.0886 |
| 2.7631 | 3.0 | 39096 | 2.7905 | 22.5734 | 6.555 | 20.9491 | 20.9509 | 12.0767 |
### Framework versions
- Transformers 4.6.1
- Pytorch 1.7.0
- Datasets 1.11.0
- Tokenizers 0.10.3
|
fadhilarkan/qa-indo-math-k-v2 | e1c3e77222ce625da9a708c3c29f234241e74d19 | 2021-08-23T08:45:10.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | fadhilarkan | null | fadhilarkan/qa-indo-math-k-v2 | 1 | null | transformers | 28,956 | ---
model-index:
- name: qa-indo-math-k-v2
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# qa-indo-math-k-v2
This model was trained from scratch on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9328
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 100
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 80 | 0.7969 |
| No log | 2.0 | 160 | 0.7612 |
| No log | 3.0 | 240 | 0.7624 |
| No log | 4.0 | 320 | 0.7424 |
| No log | 5.0 | 400 | 0.7634 |
| No log | 6.0 | 480 | 0.7415 |
| 0.9241 | 7.0 | 560 | 0.7219 |
| 0.9241 | 8.0 | 640 | 0.7792 |
| 0.9241 | 9.0 | 720 | 0.7803 |
| 0.9241 | 10.0 | 800 | 0.7666 |
| 0.9241 | 11.0 | 880 | 0.7614 |
| 0.9241 | 12.0 | 960 | 0.7616 |
| 0.6373 | 13.0 | 1040 | 0.7673 |
| 0.6373 | 14.0 | 1120 | 0.7818 |
| 0.6373 | 15.0 | 1200 | 0.8030 |
| 0.6373 | 16.0 | 1280 | 0.8021 |
| 0.6373 | 17.0 | 1360 | 0.8025 |
| 0.6373 | 18.0 | 1440 | 0.8628 |
| 0.5614 | 19.0 | 1520 | 0.8616 |
| 0.5614 | 20.0 | 1600 | 0.8739 |
| 0.5614 | 21.0 | 1680 | 0.8647 |
| 0.5614 | 22.0 | 1760 | 0.9006 |
| 0.5614 | 23.0 | 1840 | 0.9560 |
| 0.5614 | 24.0 | 1920 | 0.9395 |
| 0.486 | 25.0 | 2000 | 0.9453 |
| 0.486 | 26.0 | 2080 | 0.9569 |
| 0.486 | 27.0 | 2160 | 1.0208 |
| 0.486 | 28.0 | 2240 | 0.9860 |
| 0.486 | 29.0 | 2320 | 0.9806 |
| 0.486 | 30.0 | 2400 | 1.0681 |
| 0.486 | 31.0 | 2480 | 1.1085 |
| 0.4126 | 32.0 | 2560 | 1.1028 |
| 0.4126 | 33.0 | 2640 | 1.1110 |
| 0.4126 | 34.0 | 2720 | 1.1573 |
| 0.4126 | 35.0 | 2800 | 1.1387 |
| 0.4126 | 36.0 | 2880 | 1.2067 |
| 0.4126 | 37.0 | 2960 | 1.2079 |
| 0.3559 | 38.0 | 3040 | 1.2152 |
| 0.3559 | 39.0 | 3120 | 1.2418 |
| 0.3559 | 40.0 | 3200 | 1.2023 |
| 0.3559 | 41.0 | 3280 | 1.2679 |
| 0.3559 | 42.0 | 3360 | 1.3178 |
| 0.3559 | 43.0 | 3440 | 1.3419 |
| 0.3084 | 44.0 | 3520 | 1.4702 |
| 0.3084 | 45.0 | 3600 | 1.3824 |
| 0.3084 | 46.0 | 3680 | 1.4227 |
| 0.3084 | 47.0 | 3760 | 1.3925 |
| 0.3084 | 48.0 | 3840 | 1.4940 |
| 0.3084 | 49.0 | 3920 | 1.4110 |
| 0.2686 | 50.0 | 4000 | 1.4534 |
| 0.2686 | 51.0 | 4080 | 1.4749 |
| 0.2686 | 52.0 | 4160 | 1.5351 |
| 0.2686 | 53.0 | 4240 | 1.5479 |
| 0.2686 | 54.0 | 4320 | 1.4755 |
| 0.2686 | 55.0 | 4400 | 1.5207 |
| 0.2686 | 56.0 | 4480 | 1.5075 |
| 0.2388 | 57.0 | 4560 | 1.5470 |
| 0.2388 | 58.0 | 4640 | 1.5361 |
| 0.2388 | 59.0 | 4720 | 1.5914 |
| 0.2388 | 60.0 | 4800 | 1.6430 |
| 0.2388 | 61.0 | 4880 | 1.6249 |
| 0.2388 | 62.0 | 4960 | 1.5503 |
| 0.2046 | 63.0 | 5040 | 1.6441 |
| 0.2046 | 64.0 | 5120 | 1.6789 |
| 0.2046 | 65.0 | 5200 | 1.6174 |
| 0.2046 | 66.0 | 5280 | 1.6175 |
| 0.2046 | 67.0 | 5360 | 1.6947 |
| 0.2046 | 68.0 | 5440 | 1.6299 |
| 0.1891 | 69.0 | 5520 | 1.7419 |
| 0.1891 | 70.0 | 5600 | 1.8442 |
| 0.1891 | 71.0 | 5680 | 1.8802 |
| 0.1891 | 72.0 | 5760 | 1.8233 |
| 0.1891 | 73.0 | 5840 | 1.8172 |
| 0.1891 | 74.0 | 5920 | 1.8181 |
| 0.1664 | 75.0 | 6000 | 1.8399 |
| 0.1664 | 76.0 | 6080 | 1.8128 |
| 0.1664 | 77.0 | 6160 | 1.8423 |
| 0.1664 | 78.0 | 6240 | 1.8380 |
| 0.1664 | 79.0 | 6320 | 1.8941 |
| 0.1664 | 80.0 | 6400 | 1.8636 |
| 0.1664 | 81.0 | 6480 | 1.7949 |
| 0.1614 | 82.0 | 6560 | 1.8342 |
| 0.1614 | 83.0 | 6640 | 1.8123 |
| 0.1614 | 84.0 | 6720 | 1.8639 |
| 0.1614 | 85.0 | 6800 | 1.8580 |
| 0.1614 | 86.0 | 6880 | 1.8816 |
| 0.1614 | 87.0 | 6960 | 1.8579 |
| 0.1487 | 88.0 | 7040 | 1.8783 |
| 0.1487 | 89.0 | 7120 | 1.9175 |
| 0.1487 | 90.0 | 7200 | 1.9025 |
| 0.1487 | 91.0 | 7280 | 1.9207 |
| 0.1487 | 92.0 | 7360 | 1.9195 |
| 0.1487 | 93.0 | 7440 | 1.9142 |
| 0.1355 | 94.0 | 7520 | 1.9333 |
| 0.1355 | 95.0 | 7600 | 1.9238 |
| 0.1355 | 96.0 | 7680 | 1.9256 |
| 0.1355 | 97.0 | 7760 | 1.9305 |
| 0.1355 | 98.0 | 7840 | 1.9294 |
| 0.1355 | 99.0 | 7920 | 1.9301 |
| 0.1297 | 100.0 | 8000 | 1.9328 |
### Framework versions
- Transformers 4.6.1
- Pytorch 1.7.0
- Datasets 1.11.0
- Tokenizers 0.10.3
|
fadhilarkan/qa-indo-math-k | bd178df6dbff83acd429d25feaab02394a4ee59c | 2021-08-23T07:40:55.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | fadhilarkan | null | fadhilarkan/qa-indo-math-k | 1 | null | transformers | 28,957 | ---
model-index:
- name: qa-indo-math-k
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# qa-indo-math-k
This model was trained from scratch on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8801
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 10
- eval_batch_size: 10
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 127 | 0.7652 |
| No log | 2.0 | 254 | 0.7520 |
| No log | 3.0 | 381 | 0.7681 |
| 0.9618 | 4.0 | 508 | 0.7337 |
| 0.9618 | 5.0 | 635 | 0.7560 |
| 0.9618 | 6.0 | 762 | 0.7397 |
| 0.9618 | 7.0 | 889 | 0.7298 |
| 0.6652 | 8.0 | 1016 | 0.7891 |
| 0.6652 | 9.0 | 1143 | 0.7874 |
| 0.6652 | 10.0 | 1270 | 0.7759 |
| 0.6652 | 11.0 | 1397 | 0.7505 |
| 0.6174 | 12.0 | 1524 | 0.7838 |
| 0.6174 | 13.0 | 1651 | 0.7878 |
| 0.6174 | 14.0 | 1778 | 0.8028 |
| 0.6174 | 15.0 | 1905 | 0.8154 |
| 0.5733 | 16.0 | 2032 | 0.8131 |
| 0.5733 | 17.0 | 2159 | 0.8278 |
| 0.5733 | 18.0 | 2286 | 0.8308 |
| 0.5733 | 19.0 | 2413 | 0.8433 |
| 0.5378 | 20.0 | 2540 | 0.8303 |
| 0.5378 | 21.0 | 2667 | 0.8352 |
| 0.5378 | 22.0 | 2794 | 0.8369 |
| 0.5378 | 23.0 | 2921 | 0.8518 |
| 0.5095 | 24.0 | 3048 | 0.8749 |
| 0.5095 | 25.0 | 3175 | 0.8533 |
| 0.5095 | 26.0 | 3302 | 0.8547 |
| 0.5095 | 27.0 | 3429 | 0.8844 |
| 0.4856 | 28.0 | 3556 | 0.8752 |
| 0.4856 | 29.0 | 3683 | 0.8804 |
| 0.4856 | 30.0 | 3810 | 0.8801 |
### Framework versions
- Transformers 4.6.1
- Pytorch 1.7.0
- Datasets 1.11.0
- Tokenizers 0.10.3
|
fadhilarkan/tmpr60526f6 | d7355c1978336813de661e21fbd9439b7f946650 | 2021-08-23T22:32:25.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | fadhilarkan | null | fadhilarkan/tmpr60526f6 | 1 | null | transformers | 28,958 | Entry not found |
fbaigt/proc_roberta | 02244bc84ce86d26da9c70669d4cc9f1f0bfe57f | 2021-11-08T15:02:04.000Z | [
"pytorch",
"roberta",
"feature-extraction",
"en",
"dataset:pubmed",
"dataset:chemical patent",
"dataset:cooking recipe",
"arxiv:2109.04711",
"transformers"
] | feature-extraction | false | fbaigt | null | fbaigt/proc_roberta | 1 | null | transformers | 28,959 | ---
language:
- en
datasets:
- pubmed
- chemical patent
- cooking recipe
---
## Proc-RoBERTa
Proc-RoBERTa is a pre-trained language model for procedural text. It was built by fine-tuning the RoBERTa-based model on a procedural corpus (PubMed articles/chemical patents/cooking recipes), which contains 1.05B tokens. More details can be found in the following [paper](https://arxiv.org/abs/2109.04711):
```
@inproceedings{bai-etal-2021-pre,
title = "Pre-train or Annotate? Domain Adaptation with a Constrained Budget",
author = "Bai, Fan and
Ritter, Alan and
Xu, Wei",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
}
```
## Usage
```
from transformers import *
tokenizer = AutoTokenizer.from_pretrained("fbaigt/proc_roberta")
model = AutoModelForTokenClassification.from_pretrained("fbaigt/proc_roberta")
```
More usage details can be found [here](https://github.com/bflashcp3f/ProcBERT). |
felipetanios/opus-mt-de-en-finetuned-de-to-en-second | 134761ba4aa1929adbc022e17fbe96f53a18fa27 | 2021-12-04T18:48:17.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"dataset:wmt16",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | felipetanios | null | felipetanios/opus-mt-de-en-finetuned-de-to-en-second | 1 | null | transformers | 28,960 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- wmt16
metrics:
- bleu
model-index:
- name: opus-mt-de-en-finetuned-de-to-en-second
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: wmt16
type: wmt16
args: de-en
metrics:
- name: Bleu
type: bleu
value: 37.9762
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# opus-mt-de-en-finetuned-de-to-en-second
This model is a fine-tuned version of [Helsinki-NLP/opus-mt-de-en](https://huggingface.co/Helsinki-NLP/opus-mt-de-en) on the wmt16 dataset.
It achieves the following results on the evaluation set:
- Loss: 1.2282
- Bleu: 37.9762
- Gen Len: 25.3696
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|
| No log | 1.0 | 157 | 1.1837 | 38.8278 | 25.22 |
| No log | 2.0 | 314 | 1.2057 | 38.3047 | 25.2908 |
| No log | 3.0 | 471 | 1.2167 | 38.231 | 25.316 |
| 1.4808 | 4.0 | 628 | 1.2256 | 37.9871 | 25.3556 |
| 1.4808 | 5.0 | 785 | 1.2282 | 37.9762 | 25.3696 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.16.1
- Tokenizers 0.10.3
|
ffsouza/t5-small-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro | eb9087d10c3ca9852d1c645c7c8ab6373c3b0d5f | 2021-12-02T20:04:48.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | ffsouza | null | ffsouza/t5-small-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro | 1 | null | transformers | 28,961 | Entry not found |
ffsouza/t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.005-finetuned-en-to-ro | 1e4bf8eadeda2e0abfc5a928f5c7a89a510055fd | 2021-12-03T17:33:37.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | ffsouza | null | ffsouza/t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.005-finetuned-en-to-ro | 1 | null | transformers | 28,962 | Entry not found |
ffsouza/t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.02-finetuned-en-to-ro | 0d99eb211400c9e0854143f6a92a41e1ccbbf65a | 2021-12-03T16:07:55.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"dataset:wmt16_en_ro_pre_processed",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | ffsouza | null | ffsouza/t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.02-finetuned-en-to-ro | 1 | null | transformers | 28,963 | ---
tags:
- generated_from_trainer
datasets:
- wmt16_en_ro_pre_processed
metrics:
- bleu
model-index:
- name: t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.02-finetuned-en-to-ro
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: wmt16_en_ro_pre_processed
type: wmt16_en_ro_pre_processed
args: enro
metrics:
- name: Bleu
type: bleu
value: 0.0002
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.02-finetuned-en-to-ro
This model is a fine-tuned version of [patrickvonplaten/t5-tiny-random](https://huggingface.co/patrickvonplaten/t5-tiny-random) on the wmt16_en_ro_pre_processed dataset.
It achieves the following results on the evaluation set:
- Loss: 6.4854
- Bleu: 0.0002
- Gen Len: 9.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:------:|:-------:|
| 6.2568 | 1.0 | 76290 | 6.4854 | 0.0002 | 9.0 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu102
- Datasets 1.15.1
- Tokenizers 0.10.3
|
fgaim/tielectra-small-pos | 32a41e14ea96853608a55df991f2aa16c9440b35 | 2022-05-14T06:48:42.000Z | [
"pytorch",
"electra",
"token-classification",
"ti",
"dataset:TLMD",
"dataset:NTC",
"transformers",
"model-index",
"autotrain_compatible"
] | token-classification | false | fgaim | null | fgaim/tielectra-small-pos | 1 | 1 | transformers | 28,964 | ---
language: ti
widget:
- text: "ድምጻዊ ኣብርሃም ኣፈወርቂ ንዘልኣለም ህያው ኮይኑ ኣብ ልብና ይነብር"
datasets:
- TLMD
- NTC
metrics:
- f1
- precision
- recall
- accuracy
model-index:
- name: tielectra-small-pos
results:
- task:
name: Token Classification
type: token-classification
metrics:
- name: F1
type: f1
value: 0.9456
- name: Precision
type: precision
value: 0.9456
- name: Recall
type: recall
value: 0.9456
- name: Accuracy
type: accuracy
value: 0.9456
---
# Tigrinya POS tagging with TiELECTRA
This model is a fine-tuned version of [TiELECTRA](https://huggingface.co/fgaim/tielectra-small) on the NTC-v1 dataset (Tedla et al. 2016).
## Basic usage
```python
from transformers import pipeline
ti_pos = pipeline("token-classification", model="fgaim/tielectra-small-pos")
ti_pos("ድምጻዊ ኣብርሃም ኣፈወርቂ ንዘልኣለም ህያው ኮይኑ ኣብ ልብና ይነብር")
```
## Training
### Hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10.0
### Results
The model achieves the following results on the test set:
- Loss: 0.2236
- Adj Precision: 0.9148
- Adj Recall: 0.9192
- Adj F1: 0.9170
- Adj Number: 1670
- Adv Precision: 0.8228
- Adv Recall: 0.8058
- Adv F1: 0.8142
- Adv Number: 484
- Con Precision: 0.9793
- Con Recall: 0.9743
- Con F1: 0.9768
- Con Number: 972
- Fw Precision: 0.5
- Fw Recall: 0.3214
- Fw F1: 0.3913
- Fw Number: 28
- Int Precision: 0.64
- Int Recall: 0.6154
- Int F1: 0.6275
- Int Number: 26
- N Precision: 0.9525
- N Recall: 0.9587
- N F1: 0.9556
- N Number: 3992
- Num Precision: 0.9825
- Num Recall: 0.9372
- Num F1: 0.9593
- Num Number: 239
- N Prp Precision: 0.9132
- N Prp Recall: 0.9404
- N Prp F1: 0.9266
- N Prp Number: 470
- N V Precision: 0.9667
- N V Recall: 0.9760
- N V F1: 0.9713
- N V Number: 416
- Pre Precision: 0.9645
- Pre Recall: 0.9592
- Pre F1: 0.9619
- Pre Number: 907
- Pro Precision: 0.9395
- Pro Recall: 0.9079
- Pro F1: 0.9234
- Pro Number: 445
- Pun Precision: 1.0
- Pun Recall: 0.9994
- Pun F1: 0.9997
- Pun Number: 1607
- Unc Precision: 0.9286
- Unc Recall: 0.8125
- Unc F1: 0.8667
- Unc Number: 16
- V Precision: 0.7609
- V Recall: 0.8974
- V F1: 0.8235
- V Number: 78
- V Aux Precision: 0.9581
- V Aux Recall: 0.9786
- V Aux F1: 0.9682
- V Aux Number: 654
- V Ger Precision: 0.9183
- V Ger Recall: 0.9415
- V Ger F1: 0.9297
- V Ger Number: 513
- V Imf Precision: 0.9473
- V Imf Recall: 0.9442
- V Imf F1: 0.9458
- V Imf Number: 914
- V Imv Precision: 0.8163
- V Imv Recall: 0.5714
- V Imv F1: 0.6723
- V Imv Number: 70
- V Prf Precision: 0.8927
- V Prf Recall: 0.8776
- V Prf F1: 0.8851
- V Prf Number: 294
- V Rel Precision: 0.9535
- V Rel Recall: 0.9485
- V Rel F1: 0.9510
- V Rel Number: 757
- Overall Precision: 0.9456
- Overall Recall: 0.9456
- Overall F1: 0.9456
- Overall Accuracy: 0.9456
### Framework versions
- Transformers 4.10.3
- Pytorch 1.9.0+cu111
- Datasets 1.10.2
- Tokenizers 0.10.1
## Citation
If you use this model in your product or research, please cite as follows:
```
@article{Fitsum2021TiPLMs,
author= {Fitsum Gaim and Wonsuk Yang and Jong C. Park},
title= {Monolingual Pre-trained Language Models for Tigrinya},
year= 2021,
publisher= {WiNLP 2021/EMNLP 2021}
}
```
## References
```
Tedla, Y., Yamamoto, K. & Marasinghe, A. 2016.
Tigrinya Part-of-Speech Tagging with Morphological Patterns and the New Nagaoka Tigrinya Corpus.
International Journal Of Computer Applications 146 pp. 33-41 (2016).
```
|
fgaim/tielectra-small | 1bb8114c801c9855a54ca1a8dee8819b880cf4ea | 2021-10-16T19:25:40.000Z | [
"pytorch",
"jax",
"electra",
"fill-mask",
"ti",
"transformers",
"autotrain_compatible"
] | fill-mask | false | fgaim | null | fgaim/tielectra-small | 1 | 1 | transformers | 28,965 | ---
language: ti
widget:
- text: "ዓቕሚ መንእሰይ ኤርትራ [MASK] ተራእዩ"
---
# Pre-trained ELECTRA small for Tigrinya Language
We pre-train ELECTRA small on the [TLMD](https://zenodo.org/record/5139094) dataset, with over 40 million tokens.
Contained are trained Flax and PyTorch models.
## Hyperparameters
The hyperparameters corresponding to model sizes mentioned above are as follows:
| Model Size | L | AH | HS | FFN | P | Seq |
|------------|----|----|-----|------|------|------|
| SMALL | 12 | 4 | 256 | 1024 | 14M | 512 |
(L = number of layers; AH = number of attention heads; HS = hidden size; FFN = feedforward network dimension; P = number of parameters; Seq = maximum sequence length.)
|
figurative-nlp/se4fig-roberta-base | 15cfbc810952183067ed871f0e6537f21fc21b9e | 2022-02-17T15:54:01.000Z | [
"pytorch",
"roberta",
"transformers"
] | null | false | figurative-nlp | null | figurative-nlp/se4fig-roberta-base | 1 | null | transformers | 28,966 | This model can measure semantic similarity between pairs of texts containing figurative language. As far as we know,
this model works slightly better than sup-simCSE-roberta-base. For example :
**sentence 1**: I have been in seventh heaven since Harry entered my life .
**sentence 2**: I have been in very happy since Harry entered my life.
the cosin score of simcse: 0.897
the cosin score of us: 0.897
-------------------------------------------------------------------
**sentence 1**: I have been in seventh heaven since Harry entered my life .
**sentence 2**: I have been in pain since Harry entered my life .
the cosin score of simcse: 0.846
the cosin score of us: 0.753
--------------------------------------------------
It's still a big challenge for us to measure semantic similarity of figurative language from the sentence embedding perspective.
unsupvised models may useless as the key is to infer the literal meaning of the figurative expression, since the annotated is rare.
|
finiteautomata/robertuitonews-tweetcontext | 021093b4a915b21027a079e7662e6194d4dd94b9 | 2021-11-15T14:58:55.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | finiteautomata | null | finiteautomata/robertuitonews-tweetcontext | 1 | null | transformers | 28,967 | Entry not found |
flavio-nakasato/berdou_500k | 67d4f6548207d7c1ba89a80d23ac5811e0ca01b7 | 2021-08-15T15:19:49.000Z | [
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | flavio-nakasato | null | flavio-nakasato/berdou_500k | 1 | null | transformers | 28,968 | MLM fine-tuned from Bertimbau-Base model on the Brazilian Federal Official Gazette (500k instances)
|
flavio-nakasato/deeppolicytracker_200k | c8c5844ccbdc80bf6b39dd44bb8523e3c8a4e546 | 2021-08-14T22:45:13.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | flavio-nakasato | null | flavio-nakasato/deeppolicytracker_200k | 1 | null | transformers | 28,969 | RoBERTa model pretrained on the Brazilian Federal Official Gazette (200k instances).
|
flavio-nakasato/roberdou_100k | 8d729c98f6080be2f92cbaac8526513fb2d59d19 | 2021-08-15T15:38:47.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | flavio-nakasato | null | flavio-nakasato/roberdou_100k | 1 | null | transformers | 28,970 | MLM fine-tuned from BR-BERTo model on the Brazilian Federal Official Gazette (100k instances)
|
flax-community/wav2vec2-dhivehi | a83fcbd6d1a8adfc10b83c48a326421d7d1f242e | 2021-07-19T09:40:30.000Z | [
"pytorch",
"jax",
"tensorboard",
"wav2vec2",
"pretraining",
"dv",
"dataset:common_voice",
"arxiv:2006.11477",
"transformers",
"automatic-speech-recognition"
] | automatic-speech-recognition | false | flax-community | null | flax-community/wav2vec2-dhivehi | 1 | null | transformers | 28,971 | ---
language: dv
tags:
- automatic-speech-recognition
datasets:
- common_voice
---
# Wav2Vec2 Dhivehi
Wav2vec2 pre-pretrained from scratch using common voice dhivehi dataset. The model was trained with Flax during the [Flax/Jax Community Week](https://discss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104) organised by HuggingFace.
## Model description
The model used for training is [Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) by FacebookAI. It was introduced in the paper
"wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations" by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, and Michael Auli (https://arxiv.org/abs/2006.11477).
This model is available in the 🤗 [Model Hub](https://huggingface.co/facebook/wav2vec2-base-960h).
## Training data
Dhivehi data from [Common Voice](https://commonvoice.mozilla.org/en/datasets).
The dataset is also available in the 🤗 [Datasets](https://huggingface.co/datasets/common_voice) library.
## Team members
- Shahu Kareem ([@shahukareem](https://huggingface.co/shahukareem))
- Eyna ([@eyna](https://huggingface.co/eyna))
|
flax-sentence-embeddings/all_datasets_v3_MiniLM-L12 | 894997f1d826887fa7c19a6194ab1f1c32e17d7a | 2021-07-23T15:37:42.000Z | [
"pytorch",
"bert",
"en",
"arxiv:2104.08727",
"arxiv:1810.09305",
"arxiv:2102.07033",
"arxiv:1904.06472",
"sentence-transformers",
"feature-extraction",
"sentence-similarity"
] | sentence-similarity | false | flax-sentence-embeddings | null | flax-sentence-embeddings/all_datasets_v3_MiniLM-L12 | 1 | 1 | sentence-transformers | 28,972 | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
language: en
---
# Model description
The project aims to train sentence embedding models on very large sentence level datasets using a self-supervised
contrastive learning objective. We used the pretrained [`MiniLM-L12`](https://huggingface.co/microsoft/MiniLM-L12-H384-uncased) model and fine-tuned in on a
1B sentence pairs dataset. We use a contrastive learning objective: given a sentence from the pair, the model should predict which out of a set of randomly sampled other sentences, was actually paired with it in our dataset.
We developped this model during the
[Community week using JAX/Flax for NLP & CV](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104),
organized by Hugging Face. We developped this model as part of the project:
[Train the Best Sentence Embedding Model Ever with 1B Training Pairs](https://discuss.huggingface.co/t/train-the-best-sentence-embedding-model-ever-with-1b-training-pairs/7354). We benefited from efficient hardware infrastructure to run the project: 7 TPUs v3-8, as well
as intervention from Google’s Flax, JAX, and Cloud team member about efficient deep learning frameworks.
## Intended uses
Our model is intented to be used as a sentence encoder. Given an input sentence, it ouptuts a vector which captures
the sentence semantic information. The sentence vector may be used for information retrieval, clustering or sentence
similarity tasks.
## How to use
Here is how to use this model to get the features of a given text using [SentenceTransformers](https://github.com/UKPLab/sentence-transformers) library:
```python
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('flax-sentence-embeddings/all_datasets_v3_MiniLM-L12')
text = "Replace me by any text you'd like."
text_embbedding = model.encode(text)
# array([-0.01559514, 0.04046123, 0.1317083 , 0.00085931, 0.04585106,
# -0.05607086, 0.0138078 , 0.03569756, 0.01420381, 0.04266302 ...],
# dtype=float32)
```
# Training procedure
## Pre-training
We use the pretrained [`MiniLM-L12`](https://huggingface.co/microsoft/MiniLM-L12-H384-uncased). Please refer to the model
card for more detailed information about the pre-training procedure.
## Fine-tuning
We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each possible sentence pairs from the batch.
We then apply the cross entropy loss by comparing with true pairs.
### Hyper parameters
We trained ou model on a TPU v3-8. We train the model during 540k steps using a batch size of 1024 (128 per TPU core).
We use a learning rate warm up of 500. The sequence length was limited to 128 tokens. We used the AdamW optimizer with
a 2e-5 learning rate. The full training script is accessible in this current repository.
### Training data
We use the concatenation from multiple datasets to fine-tune our model. The total number of sentence pairs is above 1 billion sentences.
We sampled each dataset given a weighted probability which configuration is detailed in the `data_config.json` file.
| Dataset | Paper | Number of training tuples |
|:--------------------------------------------------------:|:----------------------------------------:|:--------------------------:|
| [GOOAQ: Open Question Answering with Diverse Answer Types](https://github.com/allenai/gooaq) | [paper](https://arxiv.org/pdf/2104.08727.pdf) | 3,012,496 |
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_title_body_jsonl) | - | 364,001 |
| [Flickr 30k](https://shannon.cs.illinois.edu/DenotationGraph/) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/229/33) | 317,695 |
| [COCO 2020](COCO 2020) | [paper](https://link.springer.com/chapter/10.1007%2F978-3-319-10602-1_48) | 828,395|
| [Code Search](https://huggingface.co/datasets/code_search_net) | - | 1,151,414 |
| [TriviaqQA](https://huggingface.co/datasets/trivia_qa) | - | 73,346 |
| [SQuAD2.0](https://rajpurkar.github.io/SQuAD-explorer/) | [paper](https://aclanthology.org/P18-2124.pdf) | 87,599 |
| [Natural Questions (NQ)](https://ai.google.com/research/NaturalQuestions) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/1455) | 100,231 |
| [Simple Wikipedia](https://cs.pomona.edu/~dkauchak/simplification/) | [paper](https://www.aclweb.org/anthology/P11-2117/) | 102,225 |
| [Quora Question Pairs](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) | - | 103,663 |
| [Altlex](https://github.com/chridey/altlex/) | [paper](https://aclanthology.org/P16-1135.pdf) | 112,696 |
| [Wikihow](https://github.com/pvl/wikihow_pairs_dataset) | [paper](https://arxiv.org/abs/1810.09305) | 128,542 |
| [Sentence Compression](https://github.com/google-research-datasets/sentence-compression) | [paper](https://www.aclweb.org/anthology/D13-1155/) | 180,000 |
| AllNLI ([SNLI](https://nlp.stanford.edu/projects/snli/) and [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) | [paper SNLI](https://doi.org/10.18653/v1/d15-1075), [paper MultiNLI](https://doi.org/10.18653/v1/n18-1101) | 277,230 |
| [Eli5](https://huggingface.co/datasets/eli5) | [paper](https://doi.org/10.18653/v1/p19-1346) | 325,475 |
| [SPECTER](https://github.com/allenai/specter) | [paper](https://doi.org/10.18653/v1/2020.acl-main.207) | 684,100 |
| [S2ORC](https://github.com/allenai/s2orc) Title/Abstract | [paper](https://aclanthology.org/2020.acl-main.447/) | 41,769,185 |
| [S2ORC](https://github.com/allenai/s2orc) Citation/Citation | [paper](https://aclanthology.org/2020.acl-main.447/) | 52,603,982 |
| [S2ORC](https://github.com/allenai/s2orc) Citation/Abstract | [paper](https://aclanthology.org/2020.acl-main.447/) | 116,288,806 |
| [PAQ](https://github.com/facebookresearch/PAQ) | [paper](https://arxiv.org/abs/2102.07033) | 64,371,441 |
| [WikiAnswers](https://github.com/afader/oqa#wikianswers-corpus) | [paper](https://doi.org/10.1145/2623330.2623677) | 77,427,422 |
| SearchQA | - | 582,261 |
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) Title/Answer | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 1,198,260 |
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) Title/Question | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 659,896 |
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) Question/Answer | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 681,164 |
| [MS MARCO](https://microsoft.github.io/msmarco/) | [paper](https://doi.org/10.1145/3404835.3462804) | 9,144,553 |
| [Reddit conversationnal](https://github.com/PolyAI-LDN/conversational-datasets/tree/master/reddit) | [paper](https://arxiv.org/abs/1904.06472) | 726,484,430 |
| total | | 1,097,953,922 |
|
flboehm/reddit-bert-text_10 | 2e1e715bae122844ab660c932bafc0794d504754 | 2021-12-18T11:07:20.000Z | [
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | fill-mask | false | flboehm | null | flboehm/reddit-bert-text_10 | 1 | null | transformers | 28,973 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: reddit-bert-text_10
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# reddit-bert-text_10
This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 2.5198
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.9626 | 1.0 | 946 | 2.6163 |
| 2.6934 | 2.0 | 1892 | 2.5612 |
| 2.5971 | 3.0 | 2838 | 2.5023 |
### Framework versions
- Transformers 4.14.1
- Pytorch 1.10.0+cu113
- Datasets 1.16.1
- Tokenizers 0.10.3
|
formermagic/codet5-xl | d9a801b53edec073025eee569b04fb7ee0934878 | 2021-10-08T02:33:12.000Z | [
"pytorch",
"jax",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | formermagic | null | formermagic/codet5-xl | 1 | null | transformers | 28,974 | Entry not found |
formermagic/codet5x-base | 5e1ae641c53408d00e483e59a51f2ad5ed3576b2 | 2021-09-25T01:57:27.000Z | [
"pytorch",
"jax",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | formermagic | null | formermagic/codet5x-base | 1 | 1 | transformers | 28,975 | Entry not found |
frgfm/cspdarknet53_mish | 8b9f2c2cf2ec60ab73e7bc8b59a08067b44f52bd | 2022-07-20T00:57:54.000Z | [
"pytorch",
"dataset:frgfm/imagenette",
"arxiv:1911.11929",
"transformers",
"image-classification",
"license:apache-2.0"
] | image-classification | false | frgfm | null | frgfm/cspdarknet53_mish | 1 | null | transformers | 28,976 | ---
license: apache-2.0
tags:
- image-classification
- pytorch
datasets:
- frgfm/imagenette
---
# CSP-Darknet-53 Mish model
Pretrained on [ImageNette](https://github.com/fastai/imagenette). The CSP-Darknet-53 Mish architecture was introduced in [this paper](https://arxiv.org/pdf/1911.11929.pdf).
## Model description
The core idea of the author is to change the convolutional stage by adding cross stage partial blocks in the architecture and replace activations with Mish.
## Installation
### Prerequisites
Python 3.6 (or higher) and [pip](https://pip.pypa.io/en/stable/)/[conda](https://docs.conda.io/en/latest/miniconda.html) are required to install Holocron.
### Latest stable release
You can install the last stable release of the package using [pypi](https://pypi.org/project/pylocron/) as follows:
```shell
pip install pylocron
```
or using [conda](https://anaconda.org/frgfm/pylocron):
```shell
conda install -c frgfm pylocron
```
### Developer mode
Alternatively, if you wish to use the latest features of the project that haven't made their way to a release yet, you can install the package from source *(install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) first)*:
```shell
git clone https://github.com/frgfm/Holocron.git
pip install -e Holocron/.
```
## Usage instructions
```python
from PIL import Image
from torchvision.transforms import Compose, ConvertImageDtype, Normalize, PILToTensor, Resize
from torchvision.transforms.functional import InterpolationMode
from holocron.models import model_from_hf_hub
model = model_from_hf_hub("frgfm/cspdarknet53_mish").eval()
img = Image.open(path_to_an_image).convert("RGB")
# Preprocessing
config = model.default_cfg
transform = Compose([
Resize(config['input_shape'][1:], interpolation=InterpolationMode.BILINEAR),
PILToTensor(),
ConvertImageDtype(torch.float32),
Normalize(config['mean'], config['std'])
])
input_tensor = transform(img).unsqueeze(0)
# Inference
with torch.inference_mode():
output = model(input_tensor)
probs = output.squeeze(0).softmax(dim=0)
```
## Citation
Original paper
```bibtex
@article{DBLP:journals/corr/abs-1911-11929,
author = {Chien{-}Yao Wang and
Hong{-}Yuan Mark Liao and
I{-}Hau Yeh and
Yueh{-}Hua Wu and
Ping{-}Yang Chen and
Jun{-}Wei Hsieh},
title = {CSPNet: {A} New Backbone that can Enhance Learning Capability of {CNN}},
journal = {CoRR},
volume = {abs/1911.11929},
year = {2019},
url = {http://arxiv.org/abs/1911.11929},
eprinttype = {arXiv},
eprint = {1911.11929},
timestamp = {Tue, 03 Dec 2019 20:41:07 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1911-11929.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
Source of this implementation
```bibtex
@software{Fernandez_Holocron_2020,
author = {Fernandez, François-Guillaume},
month = {5},
title = {{Holocron}},
url = {https://github.com/frgfm/Holocron},
year = {2020}
}
```
|
frtna/t5-small-finetuned-Spanish-to-Italian | 41d4b498e87e04ee5a3155a175088118f0b2e67c | 2021-12-27T06:01:32.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | frtna | null | frtna/t5-small-finetuned-Spanish-to-Italian | 1 | null | transformers | 28,977 | Entry not found |
frtna/ted_mt-Spanish-to-Italian | 3bd08be7e2cd98eaf40639a9d399a1a994ffb9a8 | 2022-03-28T22:04:21.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"dataset:new_dataset",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | frtna | null | frtna/ted_mt-Spanish-to-Italian | 1 | null | transformers | 28,978 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- new_dataset
model-index:
- name: ted_mt-Spanish-to-Italian
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# ted_mt-Spanish-to-Italian
This model is a fine-tuned version of [Helsinki-NLP/opus-mt-es-it](https://huggingface.co/Helsinki-NLP/opus-mt-es-it) on the new_dataset dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Sacrebleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:-------:|
| No log | 1.0 | 46 | 1.4873 | 29.6133 | 26.9081 |
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0
- Datasets 2.0.0
- Tokenizers 0.11.6
|
furyhawk/t5-small-finetuned-bbc | 9dfd6be623b15dc939bec5393920024359b1dd58 | 2021-10-29T11:01:51.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | furyhawk | null | furyhawk/t5-small-finetuned-bbc | 1 | 1 | transformers | 28,979 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: t5-small-finetuned-bbc
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-bbc
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3238
- Rouge1: 21.2266
- Rouge2: 16.0927
- Rougel: 19.6785
- Rougelsum: 19.8849
- Gen Len: 19.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
| 0.4882 | 1.0 | 1001 | 0.3238 | 21.2266 | 16.0927 | 19.6785 | 19.8849 | 19.0 |
### Framework versions
- Transformers 4.12.0
- Pytorch 1.10.0
- Datasets 1.14.0
- Tokenizers 0.10.3
|
gabtan99/dialogpt-tagalog-medium | 5672eafc6e302639fa07b2c460e52a230e7ffeae | 2021-08-16T03:34:56.000Z | [
"pytorch",
"gpt2",
"text-generation",
"tl",
"dataset:gabtan99/pex-conversations",
"transformers",
"conversational",
"tagalog",
"filipino"
] | conversational | false | gabtan99 | null | gabtan99/dialogpt-tagalog-medium | 1 | null | transformers | 28,980 | ---
tags:
- conversational
- tagalog
- filipino
language:
- tl
inference: false
datasets:
- gabtan99/pex-conversations
---
# Tagalog DialoGPT
A DialoGPT-medium model fine-tuned on Tagalog conversational data scraped from the web. This model is an output of a research on RoBERTa-based data augmentation for low resource languages. This is the baseline model which did not use any synthetic data in training.
# Latest release: July 25, 2021
* The model is currently only able to respond based on the history of 3 previous utterances before being limited. This is a result of the scarce amount of Tagalog conversations in our dataset.
# Dataset
[PEx Conversations Dataset](https://huggingface.co/datasets/gabtan99/pex-conversations)
# Usage
Here is an example of using beam search for model inference.
```
for step in range(2):
# encode the new user input, add the eos_token and return a tensor in Pytorch
new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt')
# append the new user input tokens to the chat history
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
# we limit the generation to 512 tokens, each utterance in training had a maximum of 128 tokens
chat_history_ids = model.generate(
bot_input_ids, max_length=512,
pad_token_id=tokenizer.eos_token_id,
num_beams=5,
no_repeat_ngram_size=3
)
# pretty print last ouput tokens from bot
print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
```
# Training Script
[Fine-tuning script adapted from Spanish DialoGPT](https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb)
# Research by
* [tyadrianpaule](https://huggingface.co/tyadrianpaule)
* [schuylerng](https://huggingface.co/schuylerng)
* [dcl127](https://huggingface.co/dcl127) |
gaetangate/bart-large_genrl_simpleq | 4cd2026b087ee47dcbdd9360052cd8c6e385a2e2 | 2022-04-05T15:09:05.000Z | [
"pytorch",
"bart",
"text2text-generation",
"arxiv:2108.07337",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
] | text2text-generation | false | gaetangate | null | gaetangate/bart-large_genrl_simpleq | 1 | null | transformers | 28,981 | ---
license: apache-2.0
---
This model is used in the paper **Generative Relation Linking for Question Answering over Knowledge Bases**. [ArXiv](https://arxiv.org/abs/2108.07337), [GitHub](https://github.com/IBM/kbqa-relation-linking)
## Citation
```bibtex
@inproceedings{rossiello-genrl-2021,
title={Generative relation linking for question answering over knowledge bases},
author={Rossiello, Gaetano and Mihindukulasooriya, Nandana and Abdelaziz, Ibrahim and Bornea, Mihaela and Gliozzo, Alfio and Naseem, Tahira and Kapanipathi, Pavan},
booktitle={International Semantic Web Conference},
pages={321--337},
year={2021},
organization={Springer},
url = "https://link.springer.com/chapter/10.1007/978-3-030-88361-4_19",
doi = "10.1007/978-3-030-88361-4_19"
}
```
|
gagan3012/ViTGPT2I2A | 1a1ef7d31e41b1780a390910ec707ff35fe9e731 | 2022-02-08T03:27:44.000Z | [
"pytorch",
"vision-encoder-decoder",
"transformers",
"image-captioning",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | null | false | gagan3012 | null | gagan3012/ViTGPT2I2A | 1 | null | transformers | 28,982 | ---
license: apache-2.0
tags:
- image-captioning
- generated_from_trainer
model-index:
- name: ViTGPT2I2A
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# ViTGPT2I2A
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the vizwiz dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0708
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- distributed_type: multi-GPU
- num_devices: 2
- total_train_batch_size: 4
- total_eval_batch_size: 4
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 0.1528 | 0.17 | 1000 | 0.0869 |
| 0.0899 | 0.34 | 2000 | 0.0817 |
| 0.084 | 0.51 | 3000 | 0.0790 |
| 0.0814 | 0.68 | 4000 | 0.0773 |
| 0.0803 | 0.85 | 5000 | 0.0757 |
| 0.077 | 1.02 | 6000 | 0.0745 |
| 0.0739 | 1.19 | 7000 | 0.0740 |
| 0.0719 | 1.37 | 8000 | 0.0737 |
| 0.0717 | 1.54 | 9000 | 0.0730 |
| 0.0731 | 1.71 | 10000 | 0.0727 |
| 0.0708 | 1.88 | 11000 | 0.0720 |
| 0.0697 | 2.05 | 12000 | 0.0717 |
| 0.0655 | 2.22 | 13000 | 0.0719 |
| 0.0653 | 2.39 | 14000 | 0.0719 |
| 0.0657 | 2.56 | 15000 | 0.0712 |
| 0.0663 | 2.73 | 16000 | 0.0710 |
| 0.0654 | 2.9 | 17000 | 0.0708 |
| 0.0645 | 3.07 | 18000 | 0.0716 |
| 0.0616 | 3.24 | 19000 | 0.0712 |
| 0.0607 | 3.41 | 20000 | 0.0712 |
| 0.0611 | 3.58 | 21000 | 0.0711 |
| 0.0615 | 3.76 | 22000 | 0.0711 |
| 0.0614 | 3.93 | 23000 | 0.0710 |
| 0.0594 | 4.1 | 24000 | 0.0716 |
| 0.0587 | 4.27 | 25000 | 0.0715 |
| 0.0574 | 4.44 | 26000 | 0.0715 |
| 0.0579 | 4.61 | 27000 | 0.0715 |
| 0.0581 | 4.78 | 28000 | 0.0715 |
| 0.0579 | 4.95 | 29000 | 0.0715 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.2+cu113
- Datasets 1.18.3
- Tokenizers 0.11.0
|
gagan3012/ViTGPT2_VW | 93c31484c3072fa3b86c486ede238e59bc44d847 | 2022-02-07T18:35:06.000Z | [
"pytorch",
"vision-encoder-decoder",
"transformers",
"generated_from_trainer",
"model-index"
] | null | false | gagan3012 | null | gagan3012/ViTGPT2_VW | 1 | null | transformers | 28,983 | ---
tags:
- generated_from_trainer
model-index:
- name: ViTGPT2_VW
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# ViTGPT2_VW
This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0771
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- distributed_type: multi-GPU
- num_devices: 2
- total_train_batch_size: 4
- total_eval_batch_size: 4
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 0.1256 | 0.03 | 1000 | 0.0928 |
| 0.0947 | 0.07 | 2000 | 0.0897 |
| 0.0889 | 0.1 | 3000 | 0.0859 |
| 0.0888 | 0.14 | 4000 | 0.0842 |
| 0.0866 | 0.17 | 5000 | 0.0831 |
| 0.0852 | 0.2 | 6000 | 0.0819 |
| 0.0833 | 0.24 | 7000 | 0.0810 |
| 0.0835 | 0.27 | 8000 | 0.0802 |
| 0.081 | 0.31 | 9000 | 0.0796 |
| 0.0803 | 0.34 | 10000 | 0.0789 |
| 0.0814 | 0.38 | 11000 | 0.0785 |
| 0.0799 | 0.41 | 12000 | 0.0780 |
| 0.0786 | 0.44 | 13000 | 0.0776 |
| 0.0796 | 0.48 | 14000 | 0.0771 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.2+cu113
- Datasets 1.18.3
- Tokenizers 0.11.0
|
gagan3012/model | 753663868669dd0f630b127f783dcf19555c6962 | 2021-10-18T18:23:58.000Z | [
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-generation | false | gagan3012 | null | gagan3012/model | 1 | null | transformers | 28,984 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: model
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# model
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 3.6250
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 2
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
### Framework versions
- Transformers 4.12.0.dev0
- Pytorch 1.9.0+cu111
- Datasets 1.13.3
- Tokenizers 0.10.3
|
gagan3012/project-code-py-neo | e28fae47af3aa731003fa615abff579bf53207c0 | 2021-05-25T07:32:07.000Z | [
"pytorch",
"gpt_neo",
"text-generation",
"transformers"
] | text-generation | false | gagan3012 | null | gagan3012/project-code-py-neo | 1 | null | transformers | 28,985 | Entry not found |
gagan3012/summarsiation | 57ef8e80d73aa6022af0bcb57b0143b65b38d4b1 | 2021-08-17T17:17:30.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | gagan3012 | null | gagan3012/summarsiation | 1 | null | transformers | 28,986 | ---
Summarisation model summarsiation |
gagan3012/wav2vec2-large-xls-r-300m-hindi | afe0d6fc271251aa848656c10ef5891795ec1ffe | 2022-01-28T18:47:50.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | gagan3012 | null | gagan3012/wav2vec2-large-xls-r-300m-hindi | 1 | null | transformers | 28,987 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: wav2vec2-large-xls-r-300m-hindi
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-hindi
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.2.dev0
- Tokenizers 0.11.0
|
gagan3012/wav2vec2-large-xls-r-hindi | f9e42271c2adb182ea2d69220cd54e667109e525 | 2022-01-28T19:14:14.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | gagan3012 | null | gagan3012/wav2vec2-large-xls-r-hindi | 1 | null | transformers | 28,988 | Entry not found |
gagan3012/xls-r-300m-ta | c7da3e627301bc45e971d426126f3ad4e20adaf3 | 2022-01-31T20:29:33.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | gagan3012 | null | gagan3012/xls-r-300m-ta | 1 | null | transformers | 28,989 | Entry not found |
gaotianyu1350/sup-simcse-bert-large-uncased | 1002873deea93828394ca1034dc34e75ded25fa9 | 2021-05-19T17:06:11.000Z | [
"pytorch",
"jax",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | gaotianyu1350 | null | gaotianyu1350/sup-simcse-bert-large-uncased | 1 | null | transformers | 28,990 | Entry not found |
gaotianyu1350/sup-simcse-roberta-base | 19d5c7f7fcea71d06c80689e8332f7724096f876 | 2021-05-20T16:21:48.000Z | [
"pytorch",
"jax",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | gaotianyu1350 | null | gaotianyu1350/sup-simcse-roberta-base | 1 | null | transformers | 28,991 | Entry not found |
gayanin/t5-small-mlm-pubmed-45 | 1bf8424db32fdf58bf21b8666b19f20f906df1db | 2021-11-22T23:47:01.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | gayanin | null | gayanin/t5-small-mlm-pubmed-45 | 1 | null | transformers | 28,992 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: t5-small-mlm-pubmed-45
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-mlm-pubmed-45
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.6395
- Rouge2 Precision: 0.3383
- Rouge2 Recall: 0.2424
- Rouge2 Fmeasure: 0.2753
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge2 Precision | Rouge2 Recall | Rouge2 Fmeasure |
|:-------------:|:-----:|:----:|:---------------:|:----------------:|:-------------:|:---------------:|
| 2.519 | 0.75 | 500 | 1.9659 | 0.3178 | 0.1888 | 0.2299 |
| 2.169 | 1.51 | 1000 | 1.8450 | 0.3256 | 0.2138 | 0.25 |
| 2.0796 | 2.26 | 1500 | 1.7900 | 0.3368 | 0.2265 | 0.2636 |
| 1.9978 | 3.02 | 2000 | 1.7553 | 0.3427 | 0.234 | 0.2709 |
| 1.9686 | 3.77 | 2500 | 1.7172 | 0.3356 | 0.2347 | 0.2692 |
| 1.9142 | 4.52 | 3000 | 1.6986 | 0.3358 | 0.238 | 0.2715 |
| 1.921 | 5.28 | 3500 | 1.6770 | 0.3349 | 0.2379 | 0.2709 |
| 1.8848 | 6.03 | 4000 | 1.6683 | 0.3346 | 0.2379 | 0.2708 |
| 1.8674 | 6.79 | 4500 | 1.6606 | 0.3388 | 0.2419 | 0.2752 |
| 1.8606 | 7.54 | 5000 | 1.6514 | 0.3379 | 0.2409 | 0.274 |
| 1.8515 | 8.3 | 5500 | 1.6438 | 0.3356 | 0.2407 | 0.2731 |
| 1.8403 | 9.05 | 6000 | 1.6401 | 0.3367 | 0.2421 | 0.2744 |
| 1.8411 | 9.8 | 6500 | 1.6395 | 0.3383 | 0.2424 | 0.2753 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
|
gayanin/t5-small-mlm-pubmed | d33459f72434ea4099ba0f4b31a0afd832a6e041 | 2021-11-08T17:26:42.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | gayanin | null | gayanin/t5-small-mlm-pubmed | 1 | null | transformers | 28,993 | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: t5-small-mlm-pubmed
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-mlm-pubmed
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8008
- Rouge2 Precision: 0.6071
- Rouge2 Recall: 0.4566
- Rouge2 Fmeasure: 0.5079
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 40
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge2 Precision | Rouge2 Recall | Rouge2 Fmeasure |
|:-------------:|:-----:|:-----:|:---------------:|:----------------:|:-------------:|:---------------:|
| 0.914 | 0.75 | 500 | 0.8691 | 0.5901 | 0.4357 | 0.4879 |
| 0.9093 | 1.51 | 1000 | 0.8646 | 0.5867 | 0.4372 | 0.488 |
| 0.895 | 2.26 | 1500 | 0.8618 | 0.5891 | 0.4387 | 0.49 |
| 0.8842 | 3.02 | 2000 | 0.8571 | 0.5899 | 0.4374 | 0.4891 |
| 0.8796 | 3.77 | 2500 | 0.8544 | 0.5903 | 0.4406 | 0.4916 |
| 0.8759 | 4.52 | 3000 | 0.8513 | 0.5921 | 0.4395 | 0.4912 |
| 0.8621 | 5.28 | 3500 | 0.8485 | 0.5934 | 0.4413 | 0.493 |
| 0.8613 | 6.03 | 4000 | 0.8442 | 0.5944 | 0.4428 | 0.4944 |
| 0.8537 | 6.79 | 4500 | 0.8406 | 0.594 | 0.4414 | 0.4932 |
| 0.8518 | 7.54 | 5000 | 0.8399 | 0.5956 | 0.4424 | 0.4945 |
| 0.8438 | 8.3 | 5500 | 0.8365 | 0.5953 | 0.4452 | 0.4964 |
| 0.8339 | 9.05 | 6000 | 0.8353 | 0.5983 | 0.4468 | 0.4983 |
| 0.8307 | 9.8 | 6500 | 0.8331 | 0.5979 | 0.4461 | 0.4976 |
| 0.8328 | 10.56 | 7000 | 0.8304 | 0.5975 | 0.4465 | 0.4979 |
| 0.8263 | 11.31 | 7500 | 0.8283 | 0.5977 | 0.4467 | 0.4981 |
| 0.8168 | 12.07 | 8000 | 0.8267 | 0.5971 | 0.4463 | 0.4976 |
| 0.8165 | 12.82 | 8500 | 0.8248 | 0.5969 | 0.4462 | 0.4976 |
| 0.8084 | 13.57 | 9000 | 0.8245 | 0.6018 | 0.4527 | 0.5035 |
| 0.8136 | 14.33 | 9500 | 0.8219 | 0.6023 | 0.4509 | 0.5023 |
| 0.8073 | 15.08 | 10000 | 0.8206 | 0.6002 | 0.4486 | 0.5001 |
| 0.808 | 15.84 | 10500 | 0.8185 | 0.6009 | 0.4506 | 0.5019 |
| 0.8027 | 16.59 | 11000 | 0.8173 | 0.5978 | 0.4478 | 0.4989 |
| 0.8061 | 17.35 | 11500 | 0.8169 | 0.6022 | 0.4513 | 0.5026 |
| 0.7922 | 18.1 | 12000 | 0.8152 | 0.6016 | 0.4501 | 0.5016 |
| 0.7928 | 18.85 | 12500 | 0.8141 | 0.6009 | 0.45 | 0.5012 |
| 0.7909 | 19.61 | 13000 | 0.8143 | 0.6019 | 0.4521 | 0.5028 |
| 0.7909 | 20.36 | 13500 | 0.8115 | 0.5997 | 0.4505 | 0.5011 |
| 0.7949 | 21.12 | 14000 | 0.8115 | 0.6043 | 0.4536 | 0.5048 |
| 0.7853 | 21.87 | 14500 | 0.8095 | 0.6033 | 0.4527 | 0.5038 |
| 0.7819 | 22.62 | 15000 | 0.8095 | 0.6054 | 0.4541 | 0.5056 |
| 0.7828 | 23.38 | 15500 | 0.8075 | 0.6036 | 0.453 | 0.5042 |
| 0.787 | 24.13 | 16000 | 0.8068 | 0.6031 | 0.4528 | 0.504 |
| 0.7739 | 24.89 | 16500 | 0.8072 | 0.6043 | 0.4529 | 0.5045 |
| 0.7782 | 25.64 | 17000 | 0.8073 | 0.606 | 0.4551 | 0.5063 |
| 0.7772 | 26.4 | 17500 | 0.8063 | 0.6055 | 0.4549 | 0.5062 |
| 0.7718 | 27.15 | 18000 | 0.8057 | 0.606 | 0.4546 | 0.5059 |
| 0.7747 | 27.9 | 18500 | 0.8045 | 0.6046 | 0.4543 | 0.5054 |
| 0.7738 | 28.66 | 19000 | 0.8035 | 0.6059 | 0.4549 | 0.506 |
| 0.7642 | 29.41 | 19500 | 0.8041 | 0.6053 | 0.4545 | 0.5058 |
| 0.7666 | 30.17 | 20000 | 0.8039 | 0.6066 | 0.457 | 0.508 |
| 0.7686 | 30.92 | 20500 | 0.8027 | 0.6075 | 0.4571 | 0.5081 |
| 0.7664 | 31.67 | 21000 | 0.8026 | 0.6062 | 0.4566 | 0.5076 |
| 0.77 | 32.43 | 21500 | 0.8022 | 0.6068 | 0.4571 | 0.5081 |
| 0.7618 | 33.18 | 22000 | 0.8015 | 0.6065 | 0.4563 | 0.5072 |
| 0.7615 | 33.94 | 22500 | 0.8013 | 0.6064 | 0.4565 | 0.5074 |
| 0.7611 | 34.69 | 23000 | 0.8017 | 0.607 | 0.4567 | 0.5078 |
| 0.7611 | 35.44 | 23500 | 0.8013 | 0.608 | 0.4565 | 0.5082 |
| 0.7604 | 36.2 | 24000 | 0.8012 | 0.6069 | 0.4561 | 0.5072 |
| 0.7599 | 36.95 | 24500 | 0.8013 | 0.6078 | 0.4571 | 0.5085 |
| 0.7542 | 37.71 | 25000 | 0.8016 | 0.6083 | 0.4579 | 0.5091 |
| 0.7637 | 38.46 | 25500 | 0.8009 | 0.6072 | 0.4569 | 0.5081 |
| 0.7596 | 39.22 | 26000 | 0.8008 | 0.6069 | 0.4566 | 0.5078 |
| 0.7604 | 39.97 | 26500 | 0.8008 | 0.6071 | 0.4566 | 0.5079 |
### Framework versions
- Transformers 4.12.3
- Pytorch 1.9.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
|
gchhablani/wav2vec2-large-xlsr-cnh | 90564648a748d0ee77977477f5e0a1eb02e7aff0 | 2021-07-06T04:25:40.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"cnh",
"dataset:common_voice",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | gchhablani | null | gchhablani/wav2vec2-large-xlsr-cnh | 1 | null | transformers | 28,994 | ---
language: cnh
datasets:
- common_voice
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: Wav2Vec2 Large 53 Hakha Chin by Gunjan Chhablani
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice cnh
type: common_voice
args: cnh
metrics:
- name: Test WER
type: wer
value: 31.38
---
# Wav2Vec2-Large-XLSR-53-Hakha-Chin
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Hakha Chin using the [Common Voice](https://huggingface.co/datasets/common_voice) dataset.
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "cnh", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-cnh")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-cnh/")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Portuguese test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "cnh", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-cnh")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-cnh")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\/]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 31.38 %
## Training
The Common Voice `train` and `validation` datasets were used for training. The script used for training can be found [here](https://colab.research.google.com/drive/1pejk9gv9vMcUOjyVQ_vsV2ngW4NiWLWy?usp=sharing). |
gchhablani/wav2vec2-large-xlsr-gu | f89ae9d57db3d2c8dfbc2465f1a02ad41071b548 | 2021-07-06T04:38:17.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"gu",
"dataset:openslr",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | gchhablani | null | gchhablani/wav2vec2-large-xlsr-gu | 1 | null | transformers | 28,995 | ---
language: gu
datasets:
- openslr
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Large 53 Gujarati by Gunjan Chhablani
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: OpenSLR gu
type: openslr
metrics:
- name: Test WER
type: wer
value: 23.55
---
# Wav2Vec2-Large-XLSR-53-Gujarati
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Gujarati using the [OpenSLR SLR78](http://openslr.org/78/) dataset. When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows, assuming you have a dataset with Gujarati `sentence` and `path` fields:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
# test_dataset = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET.
# For sample see the Colab link in Training Section.
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-gu")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-gu")
resampler = torchaudio.transforms.Resample(48_000, 16_000) # The original data was with 48,000 sampling rate. You can change it according to your input.
# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset_eval = test_dataset_eval.map(speech_file_to_array_fn)
inputs = processor(test_dataset_eval["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset_eval["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on 10% of the Marathi data on OpenSLR.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
# test_dataset = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-gu")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-gu")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\–\…\'\_\’]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"),
attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 23.55 %
## Training
90% of the OpenSLR Gujarati Male+Female dataset was used for training, after removing few examples that contained Roman characters.
The colab notebook used for training can be found [here](https://colab.research.google.com/drive/1fRQlgl4EPR4qKGScgza3MpWgbL5BeWtn?usp=sharing).
|
gchhablani/wav2vec2-large-xlsr-hu | 1c5340192b30f8644bbf2b3c9b6e7060d844057e | 2021-07-06T04:43:55.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"hu",
"dataset:common_voice",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | gchhablani | null | gchhablani/wav2vec2-large-xlsr-hu | 1 | null | transformers | 28,996 | ---
language: hu
datasets:
- common_voice
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: Wav2Vec2 Large 53 Hungarian by Gunjan Chhablani
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice hu
type: common_voice
args: hu
metrics:
- name: Test WER
type: wer
value: 46.75
---
# Wav2Vec2-Large-XLSR-53-Hungarian
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Hungarian using the [Common Voice](https://huggingface.co/datasets/common_voice) dataset.
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "hu", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-hu")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-hu")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Portuguese test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "hu", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-hu")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-hu")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\–\…]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 46.75 %
## Training
The Common Voice `train` and `validation` datasets were used for training. The code can be found [here](https://github.com/gchhablani/wav2vec2-week/blob/main/fine-tune-xlsr-wav2vec2-on-hungarian-asr.ipynb). The notebook containing the code used for evaluation can be found [here](https://colab.research.google.com/drive/1esYvWS6IkTQFfRqi_b6lAJEycuecInHE?usp=sharing). |
gchhablani/wav2vec2-large-xlsr-ia | 0050eb7579ae8662b329a1dd631e8a692a377bc9 | 2021-07-06T04:50:49.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"ia",
"dataset:common_voice",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | gchhablani | null | gchhablani/wav2vec2-large-xlsr-ia | 1 | null | transformers | 28,997 | ---
language: ia
datasets:
- common_voice
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Large 53 Interlingua by Gunjan Chhablani
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice ia
type: common_voice
args: ia
metrics:
- name: Test WER
type: wer
value: 25.09
---
# Wav2Vec2-Large-XLSR-53-Interlingua
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Interlingua using the [Common Voice](https://huggingface.co/datasets/common_voice).
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "ia", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-ia")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-ia")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Odia test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "ia", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-ia")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-ia")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\']'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 25.09 %
## Training
The Common Voice `train` and `validation` datasets were used for training for 4000 steps due to GPU timeout. The results are based on the 4000 steps checkpoint. There is a good chance that full training will lead to better results.
The colab notebook used can be found [here](https://colab.research.google.com/drive/1nbqvVwS8DTNrCzzh3vgrN55qxgoqbita?usp=sharing) and the evaluation can be found [here](https://colab.research.google.com/drive/18pCWBwNNUMUYV1FiqT_0EsTbCfwwe7ms?usp=sharing). |
gchhablani/wav2vec2-large-xlsr-mr-2 | 4ec7a5c7cc3738e4d005470df63e0dc3a27fd78f | 2021-07-06T04:59:33.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"mr",
"dataset:interspeech_2021_asr",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | gchhablani | null | gchhablani/wav2vec2-large-xlsr-mr-2 | 1 | null | transformers | 28,998 | ---
language: mr
datasets:
- interspeech_2021_asr
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Large 53 Marathi 2 by Gunjan Chhablani
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: InterSpeech 2021 ASR mr
type: interspeech_2021_asr
metrics:
- name: Test WER
type: wer
value: 14.53
---
# Wav2Vec2-Large-XLSR-53-Marathi
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Marathi using a part of the [InterSpeech 2021 Marathi](https://navana-tech.github.io/IS21SS-indicASRchallenge/data.html) dataset. When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows, assuming you have a dataset with Marathi `sentence` and `path` fields:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
# test_dataset = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-2")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-2")
resampler = torchaudio.transforms.Resample(8_000, 16_000) # The original data was with 8,000 sampling rate. You can change it according to your input.
# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the test set of the Marathi data on InterSpeech-2021.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
# test_dataset = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-2")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-2")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\'\�]'
resampler = torchaudio.transforms.Resample(8_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"),
attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 19.98 % (555 examples from test set were used for evaluation)
**Test Result on 10% of OpenSLR74 data**: 64.64 %
## Training
5000 examples of the InterSpeech Marathi dataset were used for training.
The colab notebook used for training can be found [here](https://colab.research.google.com/drive/1sIwGOLJPQqhKm_wVZDkzRuoJqAEgArFr?usp=sharing).
|
gchhablani/wav2vec2-large-xlsr-mr-3 | 189233a8d6faa0983d91c816f84c73459296046d | 2021-07-06T05:05:54.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"mr",
"dataset:openslr",
"dataset:interspeech_2021_asr",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | gchhablani | null | gchhablani/wav2vec2-large-xlsr-mr-3 | 1 | null | transformers | 28,999 | ---
language: mr
datasets:
- openslr
- interspeech_2021_asr
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Large 53 Marathi by Gunjan Chhablani
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: OpenSLR mr, InterSpeech 2021 ASR mr
type: openslr, interspeech_2021_asr
metrics:
- name: Test WER
type: wer
value: 19.05
---
# Wav2Vec2-Large-XLSR-53-Marathi
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Marathi using the [OpenSLR SLR64](http://openslr.org/64/) dataset and [InterSpeech 2021](https://navana-tech.github.io/IS21SS-indicASRchallenge/data.html) Marathi datasets. Note that this data OpenSLR contains only female voices. Please keep this in mind before using the model for your task. When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows, assuming you have a dataset with Marathi `text` and `audio_path` fields:
```python
import torch
import torchaudio
import librosa
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
# test_data = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-3")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-3")
# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["audio_path"])
batch["speech"] = librosa.resample(speech_array[0].numpy(), sampling_rate, 16_000) # sampling_rate can vary
return batch
test_data= test_data.map(speech_file_to_array_fn)
inputs = processor(test_data["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_data["text"][:2])
```
## Evaluation
The model can be evaluated as follows on 10% of the Marathi data on OpenSLR.
```python
import torch
import torchaudio
import librosa
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
# test_data = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-3")
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-3")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\–\…]'
# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
batch["text"] = re.sub(chars_to_ignore_regex, '', batch["text"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["audio_path"])
batch["speech"] = librosa.resample(speech_array[0].numpy(), sampling_rate, 16_000)
return batch
test_data= test_data.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the audio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_data.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["text"])))
```
**Test Result**: 19.05 % (157+157 examples)
**Test Result on OpenSLR test**: 14.15 % (157 examples)
**Test Results on InterSpeech test**: 27.14 % (157 examples)
## Training
1412 examples of the OpenSLR Marathi dataset and 1412 examples of InterSpeech 2021 Marathi ASR dataset were used for training. For testing, 157 examples from each were used.
The colab notebook used for training and evaluation can be found [here](https://colab.research.google.com/drive/15fUhb4bUFFGJyNLr-_alvPxVX4w0YXRu?usp=sharing).
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.