modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
sequence | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Shubham-Kumar-DTU/DialoGPT-small-goku | 1bba2e9f223eddaf08536447fa51d3e05077fb0c | 2021-09-01T21:40:10.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Shubham-Kumar-DTU | null | Shubham-Kumar-DTU/DialoGPT-small-goku | 1 | null | transformers | 28,400 | ---
tags:
- conversational
---
#goku DialoGPT Model
|
Shushant/ContaminationQuestionAnsweringTry | bcc3fe46dd97ba694c0987cf53a6e64de0cb7169 | 2022-01-13T07:14:28.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Shushant | null | Shushant/ContaminationQuestionAnsweringTry | 1 | null | transformers | 28,401 | Entry not found |
Sid51/CB | 3d8ccd8113e57b24117fd7480f9c0ceb98ff328e | 2021-06-12T17:36:59.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Sid51 | null | Sid51/CB | 1 | null | transformers | 28,402 | Entry not found |
Simovod/testSIM | 653a5b901245118117208561b0117ae1793c9e09 | 2021-08-03T09:56:11.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Simovod | null | Simovod/testSIM | 1 | null | transformers | 28,403 | Entry not found |
Sired/DialoGPT-small-trumpbot | a685a7c3c97e71a4359b3fc1848ba7f064a0d0ea | 2021-11-09T22:36:28.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Sired | null | Sired/DialoGPT-small-trumpbot | 1 | null | transformers | 28,404 | ---
tags:
- conversational
---
# Trump Insults GPT Bot |
Snaky/StupidEdwin | e1426bcb8257f12d3c87e4a1012adb77cfb89e95 | 2021-11-14T15:12:52.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Snaky | null | Snaky/StupidEdwin | 1 | null | transformers | 28,405 | ---
tags:
- conversational
---
#StupidEdwin |
Sonny/dummy-model | 4bf400edc0e6cdd30e7523968bc06cab51418a70 | 2022-01-19T21:55:22.000Z | [
"pytorch",
"camembert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Sonny | null | Sonny/dummy-model | 1 | null | transformers | 28,406 | Entry not found |
Soonhwan-Kwon/xlm-roberta-xxlarge | 90febdb12bb980657f5ae449fa97a494c9aa2c76 | 2021-11-14T09:54:29.000Z | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Soonhwan-Kwon | null | Soonhwan-Kwon/xlm-roberta-xxlarge | 1 | null | transformers | 28,407 | Entry not found |
SophieTr/results | 62c52cf9da1ebbe7339105a619595520b52854ce | 2021-12-28T19:59:38.000Z | [
"pytorch",
"pegasus",
"text2text-generation",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | text2text-generation | false | SophieTr | null | SophieTr/results | 1 | 2 | transformers | 28,408 | ---
tags:
- generated_from_trainer
model-index:
- name: results
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# results
This model is a fine-tuned version of [sshleifer/distill-pegasus-xsum-16-4](https://huggingface.co/sshleifer/distill-pegasus-xsum-16-4) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 2.4473
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 7.2378 | 0.51 | 100 | 7.1853 |
| 7.2309 | 1.01 | 200 | 6.6342 |
| 6.4796 | 1.52 | 300 | 6.3206 |
| 6.2691 | 2.02 | 400 | 6.0184 |
| 5.7382 | 2.53 | 500 | 5.5754 |
| 4.9922 | 3.03 | 600 | 4.5178 |
| 3.6031 | 3.54 | 700 | 2.8579 |
| 2.5203 | 4.04 | 800 | 2.4718 |
| 2.2563 | 4.55 | 900 | 2.4128 |
| 2.1425 | 5.05 | 1000 | 2.3767 |
| 2.004 | 5.56 | 1100 | 2.3982 |
| 2.0437 | 6.06 | 1200 | 2.3787 |
| 1.9407 | 6.57 | 1300 | 2.3952 |
| 1.9194 | 7.07 | 1400 | 2.3964 |
| 1.758 | 7.58 | 1500 | 2.4056 |
| 1.918 | 8.08 | 1600 | 2.4101 |
| 1.9162 | 8.59 | 1700 | 2.4085 |
| 1.8983 | 9.09 | 1800 | 2.4058 |
| 1.6939 | 9.6 | 1900 | 2.4050 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
|
SouvikGhosh/DialoGPT-Souvik | e05834dcee6b05ae2fa2453940dcd74a979a128a | 2021-06-21T13:00:49.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | SouvikGhosh | null | SouvikGhosh/DialoGPT-Souvik | 1 | null | transformers | 28,409 | Entry not found |
Splend1dchan/phoneme-bart-base | 8b6df48e9081fcb3b43655bf9fbecc10bb9cad29 | 2022-02-21T18:15:19.000Z | [
"pytorch",
"bart",
"feature-extraction",
"transformers"
] | feature-extraction | false | Splend1dchan | null | Splend1dchan/phoneme-bart-base | 1 | null | transformers | 28,410 | Entry not found |
Spoon/DialoGPT-small-engineer | 5465b404065771376952cf32f75afb1c5752d8c4 | 2021-09-13T15:38:07.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Spoon | null | Spoon/DialoGPT-small-engineer | 1 | null | transformers | 28,411 | ---
tags:
- conversational
---
# Engineer DialoGPT Model |
Srulikbdd/Wav2Vec2-large-xlsr-welsh | 39c6c896a05271ca7b012fccfe8afcae245de172 | 2021-07-05T17:38:11.000Z | [
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"sv",
"transformers",
"audio",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0",
"model-index"
] | automatic-speech-recognition | false | Srulikbdd | null | Srulikbdd/Wav2Vec2-large-xlsr-welsh | 1 | null | transformers | 28,412 | ---
language: sv
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Welsh by Srulik Ben David
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice cy
type: common_voice
args: cy
metrics:
- name: Test WER
type: wer
value: 29.4
---
Wav2Vec2-Large-XLSR-Welsh
Fine-tuned facebook/wav2vec2-large-xlsr-53 on the Welsh Common Voice dataset.
The data was augmented using standard augmentation approach.
When using this model, make sure that your speech input is sampled at 16kHz.
Test Result: 29.4%
Usage
The model can be used directly (without a language model) as follows:
```
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "cy", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("Srulikbdd/Wav2vec2-large-xlsr-welsh")
model = Wav2Vec2ForCTC.from_pretrained("Srulikbdd/Wav2vec2-large-xlsr-welsh")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
Evaluation
The model can be evaluated as follows on the Welsh test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "cy", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("Srulikbdd/Wav2Vec2-large-xlsr-welsh")
model = Wav2Vec2ForCTC.from_pretrained("Srulikbdd/Wav2Vec2-large-xlsr-welsh")
model.to("cuda")
chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\u2013\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\u2014\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
``` |
StephennFernandes/wav2vec2-XLS-R-300m-assamese | 5f7c476deab069103ac89b35126d83eda2224ba3 | 2022-02-08T18:26:31.000Z | [
"pytorch",
"wav2vec2",
"feature-extraction",
"transformers"
] | feature-extraction | false | StephennFernandes | null | StephennFernandes/wav2vec2-XLS-R-300m-assamese | 1 | null | transformers | 28,413 | Entry not found |
SteveC/sdc_bot_15K | b784e301338acaddf38c7bf08f5426c35d5bc0e2 | 2022-02-21T02:04:26.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | SteveC | null | SteveC/sdc_bot_15K | 1 | null | transformers | 28,414 | It's just a dialog bot trained on my Tweets. Unfortunately as tweets aren\'t very conversational it comes off pretty random. |
SteveC/sdc_bot_medium | 8416cf84aa0d78ec2e475bfcc8c496e04ab0c8c8 | 2022-02-11T16:05:17.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | SteveC | null | SteveC/sdc_bot_medium | 1 | 1 | transformers | 28,415 | Entry not found |
StevenShoemakerNLP/pitchfork | 0ad2a7f1d265cddec8d72ed99e5e1d856cb88bc6 | 2021-05-21T11:15:10.000Z | [
"pytorch",
"jax",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | StevenShoemakerNLP | null | StevenShoemakerNLP/pitchfork | 1 | null | transformers | 28,416 | Entry not found |
StormZJ/test1 | 433a185af655878ed6841825fe7aede850524e88 | 2022-02-11T05:54:35.000Z | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | StormZJ | null | StormZJ/test1 | 1 | null | transformers | 28,417 | Entry not found |
Subhashini17/wav2vec2-large-xls-r-300m-ta-colab-new | 07f13bfc8939c4be9e06bd784d630dbf6050a2bb | 2022-02-04T08:22:17.000Z | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Subhashini17 | null | Subhashini17/wav2vec2-large-xls-r-300m-ta-colab-new | 1 | null | transformers | 28,418 | Entry not found |
SuperAI2-Machima/wangchan-finetune-ner-pos-v3 | d5054912c33b30e41bb6bee320c61aa082499c20 | 2022-02-24T05:26:02.000Z | [
"pytorch",
"camembert",
"token-classification",
"transformers",
"autotrain_compatible"
] | token-classification | false | SuperAI2-Machima | null | SuperAI2-Machima/wangchan-finetune-ner-pos-v3 | 1 | null | transformers | 28,419 | Entry not found |
SuperDoge/DialoGPT-small-harrypotter | 77ddc615ac86305244b025abdff1a1b1fc2ce35c | 2021-09-01T03:29:38.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | SuperDoge | null | SuperDoge/DialoGPT-small-harrypotter | 1 | null | transformers | 28,420 | Entry not found |
T-Systems-onsite/cross-de-es-roberta-sentence-transformer | e5a811f8803d1cfd92e72bb56b17902905f0dd05 | 2021-04-06T05:37:15.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | T-Systems-onsite | null | T-Systems-onsite/cross-de-es-roberta-sentence-transformer | 1 | null | transformers | 28,421 | Entry not found |
T-Systems-onsite/cross-de-zh-roberta-sentence-transformer | 3a01731358978df1edcb1a6bfff0957627c59ac4 | 2021-04-06T11:09:56.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | T-Systems-onsite | null | T-Systems-onsite/cross-de-zh-roberta-sentence-transformer | 1 | null | transformers | 28,422 | Entry not found |
T-Systems-onsite/cross-en-de-es-roberta-sentence-transformer | b3799e45b0be57f49367130e66f5b719be999396 | 2020-12-30T06:14:24.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | T-Systems-onsite | null | T-Systems-onsite/cross-en-de-es-roberta-sentence-transformer | 1 | null | transformers | 28,423 | Entry not found |
T-Systems-onsite/cross-en-de-nl-roberta-sentence-transformer | 52a781f333060d9b7ed3a08d0f05624905d10f7b | 2020-12-30T07:03:00.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | T-Systems-onsite | null | T-Systems-onsite/cross-en-de-nl-roberta-sentence-transformer | 1 | null | transformers | 28,424 | Entry not found |
T-Systems-onsite/cross-en-it-roberta-sentence-transformer | 50cbe498cf4663cd960839683de3a2609d7ebe27 | 2022-06-28T19:56:04.000Z | [
"pytorch",
"tf",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | T-Systems-onsite | null | T-Systems-onsite/cross-en-it-roberta-sentence-transformer | 1 | null | transformers | 28,425 | Entry not found |
T-Systems-onsite/cross-en-nl-fr-roberta-sentence-transformer | 0dac0efaac0382e73372a168460782c88d45d614 | 2021-01-01T16:26:52.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | T-Systems-onsite | null | T-Systems-onsite/cross-en-nl-fr-roberta-sentence-transformer | 1 | null | transformers | 28,426 | Entry not found |
T-Systems-onsite/cross-en-nl-roberta-sentence-transformer | 61e5f6a636f9abf4262c6aeb4a2930de2b8b1017 | 2021-04-06T16:19:50.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | T-Systems-onsite | null | T-Systems-onsite/cross-en-nl-roberta-sentence-transformer | 1 | null | transformers | 28,427 | Entry not found |
T1Berger/bert-base-cased-goemotions-emotion5 | eb66ae920f8ad82e238f671922244681e8758089 | 2021-11-13T16:01:54.000Z | [
"pytorch",
"bert",
"transformers"
] | null | false | T1Berger | null | T1Berger/bert-base-cased-goemotions-emotion5 | 1 | null | transformers | 28,428 | Entry not found |
Taekyoon/neg_komrc_train | 17628b2437bbbd02f5c96e1199d790bcce909654 | 2022-03-12T16:36:37.000Z | [
"pytorch",
"tensorboard",
"bert",
"question-answering",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] | question-answering | false | Taekyoon | null | Taekyoon/neg_komrc_train | 1 | null | transformers | 28,429 | ---
tags:
- generated_from_trainer
model-index:
- name: neg_komrc_train
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# neg_komrc_train
This model is a fine-tuned version of [beomi/kcbert-base](https://huggingface.co/beomi/kcbert-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4016
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 1234
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 0.277 | 0.51 | 10000 | 0.4016 |
| 0.1671 | 1.03 | 20000 | 0.4116 |
| 0.1725 | 1.54 | 30000 | 0.4390 |
| 0.0868 | 2.06 | 40000 | 0.5147 |
| 0.0868 | 2.57 | 50000 | 0.5064 |
### Framework versions
- Transformers 4.13.0
- Pytorch 1.10.0+cu111
- Datasets 1.18.4
- Tokenizers 0.10.3
|
Taekyoon/test_bert_model | 251b3ce3555ef65916f2ac6b4142dfe4bfcb8682 | 2022-02-16T07:26:16.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | Taekyoon | null | Taekyoon/test_bert_model | 1 | null | transformers | 28,430 | Entry not found |
Taekyoon/test_model | fd614ed1434b55cc49f09de8f53ddead55f43c82 | 2021-12-20T10:27:26.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | Taekyoon | null | Taekyoon/test_model | 1 | null | transformers | 28,431 | Entry not found |
Taekyoon/v0.41_uniclova | 4d45f8366010469ef533bdc240775a0528e1dd15 | 2022-01-16T11:46:24.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | Taekyoon | null | Taekyoon/v0.41_uniclova | 1 | null | transformers | 28,432 | Entry not found |
Taekyoon/v0.4_uniclova | 4e31307f4cceda7422c8ba19db81b427d291bf84 | 2022-01-16T11:49:51.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | Taekyoon | null | Taekyoon/v0.4_uniclova | 1 | null | transformers | 28,433 | Entry not found |
Teepika/Sentence-Transformer-NSP-Fine-Tuned | 25a47a6683a85952572bc860c54d33fea0ac7ebc | 2021-10-25T22:02:24.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | Teepika | null | Teepika/Sentence-Transformer-NSP-Fine-Tuned | 1 | null | transformers | 28,434 | Entry not found |
Teepika/roberta-base-squad2-finetuned-selqa | 3abcf22e3c2d007e982e1b121e23d044cd01fb5e | 2021-12-08T21:49:27.000Z | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | false | Teepika | null | Teepika/roberta-base-squad2-finetuned-selqa | 1 | null | transformers | 28,435 | Entry not found |
Teepika/t5-small-finetuned-xsum-gcloud1 | 3eb9e6bbbf3ac13ba838a6591626f4e8cd3e6152 | 2021-11-02T08:05:04.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Teepika | null | Teepika/t5-small-finetuned-xsum-gcloud1 | 1 | null | transformers | 28,436 | Entry not found |
Teepika/t5-small-finetuned-xsum-proplus | eda00e7ee51613089f743a179580fc26bb127ace | 2021-11-02T02:02:31.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Teepika | null | Teepika/t5-small-finetuned-xsum-proplus | 1 | null | transformers | 28,437 | Entry not found |
Tejasvb/DialoGPT-small-rick | 33dbef825a58d217a1204b37963ab5c0eb12117b | 2021-08-29T05:05:19.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Tejasvb | null | Tejasvb/DialoGPT-small-rick | 1 | null | transformers | 28,438 | ---
tags:
- conversational
---
|
Tejaswini/opus-mt-en-ro-finetuned-en-to-ro | b268563e529ddfea62097d79cae560ea1fd148cd | 2022-02-18T04:35:21.000Z | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Tejaswini | null | Tejaswini/opus-mt-en-ro-finetuned-en-to-ro | 1 | null | transformers | 28,439 | Entry not found |
TheLongSentance/t5-small-finetuned-toxic | ce20c3ad99da45453d5bb5e50e6a19dbbaab6c1c | 2021-08-03T09:25:06.000Z | [
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible"
] | text2text-generation | false | TheLongSentance | null | TheLongSentance/t5-small-finetuned-toxic | 1 | null | transformers | 28,440 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model_index:
- name: t5-small-finetuned-toxic
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
metric:
name: Rouge1
type: rouge
value: 93.7659
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-toxic
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1295
- Rouge1: 93.7659
- Rouge2: 3.6618
- Rougel: 93.7652
- Rougelsum: 93.7757
- Gen Len: 2.5481
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:|
| 0.1595 | 1.0 | 7979 | 0.1295 | 93.7659 | 3.6618 | 93.7652 | 93.7757 | 2.5481 |
### Framework versions
- Transformers 4.9.1
- Pytorch 1.9.0+cu102
- Datasets 1.11.0
- Tokenizers 0.10.3
|
TheLongSentance/t5_large_baseline | bddd59761ed9aa42494782ae3de6f6f0da808c9b | 2021-08-24T11:11:19.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible"
] | text2text-generation | false | TheLongSentance | null | TheLongSentance/t5_large_baseline | 1 | null | transformers | 28,441 | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model_index:
- name: t5_large_baseline
results:
- task:
name: Summarization
type: summarization
metric:
name: Rouge1
type: rouge
value: 99.8958
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5_large_baseline
This model is a fine-tuned version of [t5-large](https://huggingface.co/t5-large) on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0010
- Rouge1: 99.8958
- Rouge2: 99.8696
- Rougel: 99.8958
- Rougelsum: 99.8958
- Gen Len: 46.715
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adafactor
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
| 0.9852 | 0.33 | 50 | 0.1098 | 55.1421 | 49.8248 | 54.4294 | 54.7377 | 19.0 |
| 0.1186 | 0.67 | 100 | 0.0176 | 58.0994 | 54.8973 | 57.7383 | 57.9538 | 19.0 |
| 0.0417 | 1.0 | 150 | 0.0057 | 58.3685 | 55.7353 | 58.279 | 58.2729 | 19.0 |
| 0.0225 | 1.33 | 200 | 0.0029 | 58.8981 | 56.2457 | 58.8202 | 58.7906 | 19.0 |
| 0.0131 | 1.67 | 250 | 0.0024 | 58.8439 | 56.2535 | 58.7557 | 58.7218 | 19.0 |
| 0.0112 | 2.0 | 300 | 0.0013 | 58.9538 | 56.4749 | 58.9322 | 58.8817 | 19.0 |
| 0.0077 | 2.33 | 350 | 0.0013 | 58.9538 | 56.4749 | 58.9322 | 58.8817 | 19.0 |
| 0.0043 | 2.67 | 400 | 0.0010 | 59.0124 | 56.5806 | 58.9867 | 58.9342 | 19.0 |
| 0.0052 | 3.0 | 450 | 0.0010 | 59.0402 | 56.6982 | 59.0385 | 58.986 | 19.0 |
### Framework versions
- Transformers 4.10.0.dev0
- Pytorch 1.9.0+cu111
- Datasets 1.11.0
- Tokenizers 0.10.3
|
TheLongSentance/t5_mimic_final_chkpnt150000 | e73eb3d21ef1d8b8b3604b93466f90f97ec2a658 | 2021-09-16T09:44:19.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_final_chkpnt150000 | 1 | null | transformers | 28,442 | Entry not found |
TheLongSentance/t5_mimic_final_chkpnt25000 | d81130e51b3e83d0d53da03cf16c3c1362b19089 | 2021-09-16T13:34:10.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_final_chkpnt25000 | 1 | null | transformers | 28,443 | Entry not found |
TheLongSentance/t5_mimic_final_chkpnt30000 | 42908f1aa864d4cef1fa258ef552f1152b41093c | 2021-09-16T14:01:53.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_final_chkpnt30000 | 1 | null | transformers | 28,444 | Entry not found |
TheLongSentance/t5_mimic_final_chkpnt5000 | 8e080e696d1cfbc6c3dbae6efd345a23247d1452 | 2021-09-15T22:17:10.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_final_chkpnt5000 | 1 | null | transformers | 28,445 | Entry not found |
TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_1_nbn_lr1e4c_chkpnt20000 | d30f5ccae646f8df98a74af772cc2d4ee5c0ac3a | 2021-09-15T21:50:06.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_1_nbn_lr1e4c_chkpnt20000 | 1 | null | transformers | 28,446 | Entry not found |
TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_chkpnt20000 | 50e3fff0a75ce3926d10f06e823593f437bbfa32 | 2021-09-15T19:22:15.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_chkpnt20000 | 1 | null | transformers | 28,447 | Entry not found |
TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_lr3e4c | e04eeb1cfff34a9ab3ba3967db0686a61467cbb1 | 2021-09-10T17:23:05.000Z | [
"pytorch",
"t5",
"feature-extraction",
"transformers"
] | feature-extraction | false | TheLongSentance | null | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_lr3e4c | 1 | null | transformers | 28,448 | Entry not found |
ThePeachOx/DialoGPT-small-harry | 367d40ea5d811fa867c751d5dc5855ee771d3243 | 2022-02-12T00:35:05.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | ThePeachOx | null | ThePeachOx/DialoGPT-small-harry | 1 | null | transformers | 28,449 | ---
tags:
- conversational
---
# Harry DialoGPT Model |
ThePixOne/EconBERTa | 754395f71294bf5d408b63d0b53f17d7ebab2b56 | 2021-11-29T19:13:33.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | ThePixOne | null | ThePixOne/EconBERTa | 1 | 1 | transformers | 28,450 | EconBERTa - RoBERTa further trained for 25k steps (T=512, batch_size = 256) on text sourced from economics books.
Example usage for MLM:
```python
from transformers import RobertaTokenizer, RobertaForMaskedLM
from transformers import pipeline
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('models').cpu()
model.eval()
mlm = pipeline('fill-mask', model = model, tokenizer = tokenizer)
test = "ECB - euro, FED - <mask>, BoJ - yen"
print(mlm(test)[:2])
[{'sequence': 'ECB - euro, FED - dollar, BoJ - yen',
'score': 0.7342271208763123,
'token': 1404,
'token_str': ' dollar'},
{'sequence': 'ECB - euro, FED - dollars, BoJ - yen',
'score': 0.10828445851802826,
'token': 1932,
'token_str': ' dollars'}]
```
|
Thejas/DialoGPT-small-elon | 60d29665a2fcecbce18735b17d11a0a08eeba1e2 | 2021-11-04T13:47:03.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Thejas | null | Thejas/DialoGPT-small-elon | 1 | null | transformers | 28,451 | ---
tags:
- conversational
---
#Elon Musk DialoGPT Model |
Thitaree/distilbert-base-uncased-finetuned-squad | 35ea70c4b444d1dbd1e5f379966d6a05f7be0a31 | 2021-09-01T15:33:24.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | Thitaree | null | Thitaree/distilbert-base-uncased-finetuned-squad | 1 | null | transformers | 28,452 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilbert-base-uncased-finetuned-squad
results:
- task:
name: Question Answering
type: question-answering
dataset:
name: squad
type: squad
args: plain_text
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Framework versions
- Transformers 4.10.0
- Pytorch 1.9.0+cu102
- Datasets 1.11.0
- Tokenizers 0.10.3
|
ThoracicCosine/DialoGPT-small-harrypotter | b149f65018cc91ba8c8d214e37fefd5e12959957 | 2021-08-28T14:26:13.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | ThoracicCosine | null | ThoracicCosine/DialoGPT-small-harrypotter | 1 | null | transformers | 28,453 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
Titantoe/XLMR-ENIS-finetuned-ner | 783f5a524984540f37dcc89c4816296d89af29d6 | 2021-10-05T00:54:03.000Z | [
"pytorch",
"tensorboard",
"xlm-roberta",
"token-classification",
"dataset:mim_gold_ner",
"transformers",
"generated_from_trainer",
"license:agpl-3.0",
"model-index",
"autotrain_compatible"
] | token-classification | false | Titantoe | null | Titantoe/XLMR-ENIS-finetuned-ner | 1 | null | transformers | 28,454 | ---
license: agpl-3.0
tags:
- generated_from_trainer
datasets:
- mim_gold_ner
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: XLMR-ENIS-finetuned-ner
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: mim_gold_ner
type: mim_gold_ner
args: mim-gold-ner
metrics:
- name: Precision
type: precision
value: 0.8713799976550592
- name: Recall
type: recall
value: 0.8450255827174531
- name: F1
type: f1
value: 0.8580004617871162
- name: Accuracy
type: accuracy
value: 0.9827265378338392
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# XLMR-ENIS-finetuned-ner
This model is a fine-tuned version of [vesteinn/XLMR-ENIS](https://huggingface.co/vesteinn/XLMR-ENIS) on the mim_gold_ner dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0941
- Precision: 0.8714
- Recall: 0.8450
- F1: 0.8580
- Accuracy: 0.9827
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.0572 | 1.0 | 2904 | 0.0998 | 0.8586 | 0.8171 | 0.8373 | 0.9802 |
| 0.0313 | 2.0 | 5808 | 0.0868 | 0.8666 | 0.8288 | 0.8473 | 0.9822 |
| 0.0199 | 3.0 | 8712 | 0.0941 | 0.8714 | 0.8450 | 0.8580 | 0.9827 |
### Framework versions
- Transformers 4.11.2
- Pytorch 1.9.0+cu102
- Datasets 1.12.1
- Tokenizers 0.10.3
|
Tito/T5small_model1_fp16_false-finetuned-en-to-de | b0a7fa0ba64fb5b7509bc904a362d22ec2ac549e | 2021-12-06T23:04:40.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Tito | null | Tito/T5small_model1_fp16_false-finetuned-en-to-de | 1 | null | transformers | 28,455 | Entry not found |
Tito/T5small_model3_decay_001-finetuned-en-to-de | 1bab47c16f417dd67f2da9c93afa502c5b6cc291 | 2021-12-07T00:28:34.000Z | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | Tito | null | Tito/T5small_model3_decay_001-finetuned-en-to-de | 1 | null | transformers | 28,456 | Entry not found |
Tofu05/DialoGPT-med-boon3 | 82d146b6e678be8931f524b8d806d021035e7bdb | 2022-01-30T12:53:49.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Tofu05 | null | Tofu05/DialoGPT-med-boon3 | 1 | null | transformers | 28,457 | ---
tags:
- conversational
---
# Boon Bot DialoGPT Model |
Tomasz/roberta | e45ab7593950c9e9ff152d597f0eeef117fd524f | 2021-06-15T13:13:56.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Tomasz | null | Tomasz/roberta | 1 | null | transformers | 28,458 | Entry not found |
Transabrar/roberta-base-finetuned-abs | 143f06741db621053db9b780f9ab6092854bf49d | 2021-10-12T09:14:46.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Transabrar | null | Transabrar/roberta-base-finetuned-abs | 1 | null | transformers | 28,459 | Entry not found |
Transabrar/roberta-large-finetuned-abr | c3342a0f0c164e2fdd4953175f5c2301b4ddd78f | 2021-10-10T15:33:04.000Z | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Transabrar | null | Transabrar/roberta-large-finetuned-abr | 1 | null | transformers | 28,460 | Entry not found |
Trixzy/rickai-v1 | 1ff1aa6838dc57169ba66b7523be19eed7d48ee7 | 2021-10-31T20:17:36.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Trixzy | null | Trixzy/rickai-v1 | 1 | 1 | transformers | 28,461 | ---
tags:
- conversational
---
Rick chatbot made with GPT2 ai from the show Rick and Morty, discord bot available now!
https://discord.com/oauth2/authorize?client_id=894569097818431519&permissions=1074113536&scope=bot
(v1 is no longer supported with RickBot) |
Tropics/DialoGPT-small-peppa | a3f1c11f322cdaa28aaa8b9f472b2aa53e487df9 | 2021-09-01T11:12:21.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Tropics | null | Tropics/DialoGPT-small-peppa | 1 | null | transformers | 28,462 | ---
tags:
- conversational
---
# Peppa Pig DialoGPT Model |
TuhinColumbia/QAGenControlCode | e6eea9228b01c46a3ede51b7b2c5c177657b6035 | 2021-10-10T06:44:58.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | TuhinColumbia | null | TuhinColumbia/QAGenControlCode | 1 | null | transformers | 28,463 | Entry not found |
TuhinColumbia/italianpoetrymany | 04b408bb6d4eeffa8676e1c42f71559c7fc4f9be | 2021-09-04T09:02:49.000Z | [
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | TuhinColumbia | null | TuhinColumbia/italianpoetrymany | 1 | null | transformers | 28,464 | Entry not found |
TurkuNLP/wikibert-base-af-cased | 81b848d37d1b90bbe8b6905366ada590433817b2 | 2020-05-24T19:58:31.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-af-cased | 1 | null | transformers | 28,465 | Entry not found |
TurkuNLP/wikibert-base-be-cased | 50939000cfd319f2293686f54b839a3bce396824 | 2020-05-24T19:58:44.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-be-cased | 1 | null | transformers | 28,466 | Entry not found |
TurkuNLP/wikibert-base-cs-cased | 96e72daf98259a29e4ed4a7ae9abce17ad7fb755 | 2020-05-24T19:59:01.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-cs-cased | 1 | null | transformers | 28,467 | Entry not found |
TurkuNLP/wikibert-base-el-cased | 9cda55cdc558481ba7c78d99dfbe629447aa5243 | 2020-05-24T19:59:19.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-el-cased | 1 | null | transformers | 28,468 | Entry not found |
TurkuNLP/wikibert-base-eu-cased | 96bcf3208203c68211963949aa1d30dbbd5f529e | 2020-05-24T19:59:42.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-eu-cased | 1 | null | transformers | 28,469 | Entry not found |
TurkuNLP/wikibert-base-hi-cased | 6fc2b3db1a954c936f198f56c3ea344495ed3d59 | 2020-05-24T20:00:18.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-hi-cased | 1 | null | transformers | 28,470 | Entry not found |
TurkuNLP/wikibert-base-hr-cased | 268840f7c1629d3e069d5dcdbeaca7d230651546 | 2020-05-24T20:00:23.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-hr-cased | 1 | null | transformers | 28,471 | Entry not found |
TurkuNLP/wikibert-base-it-cased | 8ca641e94ebdfca59c4e6fd444d9f58e71a46cc3 | 2020-05-24T20:00:47.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-it-cased | 1 | null | transformers | 28,472 | Entry not found |
TurkuNLP/wikibert-base-lt-cased | 9f823751591892c26d162ab6b50fdcb501552f2a | 2020-05-24T20:00:57.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-lt-cased | 1 | null | transformers | 28,473 | Entry not found |
TurkuNLP/wikibert-base-pl-cased | ff8319bdf1dfcf07c2ea832816509e5bb5ceaca7 | 2020-05-24T20:01:17.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-pl-cased | 1 | null | transformers | 28,474 | Entry not found |
TurkuNLP/wikibert-base-sk-cased | 5293ab244a7f15aa599a91489be71cfa77899cc8 | 2020-05-24T20:01:37.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-sk-cased | 1 | null | transformers | 28,475 | Entry not found |
TurkuNLP/wikibert-base-ur-cased | 0c8466656c01950972001f3f01a0adc6ca9ab016 | 2020-05-24T20:02:19.000Z | [
"pytorch",
"transformers"
] | null | false | TurkuNLP | null | TurkuNLP/wikibert-base-ur-cased | 1 | null | transformers | 28,476 | Entry not found |
UKJ5/DialoGPT-small-harrypotter | fd750dcda4674369702c0949928643ec028d09c2 | 2021-12-30T16:07:33.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | UKJ5 | null | UKJ5/DialoGPT-small-harrypotter | 1 | null | transformers | 28,477 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
Ulto/avengeeers | 04e80591dbe477860194ffccbcbf76a4f5bf53d8 | 2021-11-21T00:35:55.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | false | Ulto | null | Ulto/avengeeers | 1 | null | transformers | 28,478 | Entry not found |
Ulto/avengers2 | fce397619b860cdb4fd2cba6f5266c4be3bdb7d5 | 2021-11-21T01:13:26.000Z | [
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] | text-generation | false | Ulto | null | Ulto/avengers2 | 1 | null | transformers | 28,479 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- null
model-index:
- name: avengers2
results:
- task:
name: Causal Language Modeling
type: text-generation
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# avengers2
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 4.0131
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 56 | 3.9588 |
| No log | 2.0 | 112 | 3.9996 |
| No log | 3.0 | 168 | 4.0131 |
### Framework versions
- Transformers 4.10.0
- Pytorch 1.9.0
- Datasets 1.2.1
- Tokenizers 0.10.1
|
Unbabel/xlm-roberta-wmt-metrics-da | d38479897b881f6b896d41170a932d9f7909c010 | 2021-07-25T15:34:49.000Z | [
"pytorch",
"xlm-roberta",
"feature-extraction",
"transformers"
] | feature-extraction | false | Unbabel | null | Unbabel/xlm-roberta-wmt-metrics-da | 1 | null | transformers | 28,480 | Entry not found |
Username1/Mourinhio-medium | 74affbd170a022e0f5de2dab438b9dbda08e1a90 | 2021-09-10T20:52:27.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Username1 | null | Username1/Mourinhio-medium | 1 | null | transformers | 28,481 | ---
tags:
- conversational
---
# Mourinhio |
Username1/Mourinho | 60887756b9b5203e25ca4b60515201f41f28f495 | 2021-09-12T20:58:45.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Username1 | null | Username1/Mourinho | 1 | null | transformers | 28,482 | ---
tags:
- conversational
---
# Mourinhio |
V3RX2000/distilbert-base-uncased-finetuned-squad | 599d7a49c9a1ab1fca2357752c82a8f171d1dc75 | 2021-10-12T04:47:10.000Z | [
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | question-answering | false | V3RX2000 | null | V3RX2000/distilbert-base-uncased-finetuned-squad | 1 | null | transformers | 28,483 | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1580
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 1.2246 | 1.0 | 5533 | 1.1484 |
| 0.9433 | 2.0 | 11066 | 1.1294 |
| 0.7625 | 3.0 | 16599 | 1.1580 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.9.0+cu111
- Datasets 1.12.1
- Tokenizers 0.10.3
|
VMET/DialoGPT-small-dumbassbot | 494c4df38d2f2e9ae0282c9018f20004ff35a1cf | 2021-12-22T17:24:15.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | VMET | null | VMET/DialoGPT-small-dumbassbot | 1 | null | transformers | 28,484 | ---
tags:
- conversational
---
# Dumb bot |
VaguelyCynical/DialoGPT-small-RickSanchez | e82d480cae1fbe6664c777fb975039bdbeab7237 | 2021-09-21T06:25:40.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | VaguelyCynical | null | VaguelyCynical/DialoGPT-small-RickSanchez | 1 | null | transformers | 28,485 | ---
tags:
- conversational
---
#Rick Sanchez DiaploGPT Model |
VincentButterfield/DialoGPT-small-harrypotter | cceec1033d1437f673d4a7fccd84b27e448325f5 | 2021-10-07T02:32:43.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | VincentButterfield | null | VincentButterfield/DialoGPT-small-harrypotter | 1 | null | transformers | 28,486 | ---
tags:
- conversational
---
# Harry Potter DialoGPT Model |
VlakoResker/wav2vec2-large-xls-r-300m-ru-en | 84c562d514d70e2eb02f6066d47d6c0d11c0f4c2 | 2021-12-04T02:26:33.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | VlakoResker | null | VlakoResker/wav2vec2-large-xls-r-300m-ru-en | 1 | null | transformers | 28,487 | Entry not found |
Vlasta/CDNA_bert_6 | fc1612e52a7fdb3a292f7af2e178984375bacb10 | 2022-01-22T12:52:35.000Z | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | false | Vlasta | null | Vlasta/CDNA_bert_6 | 1 | null | transformers | 28,488 | Entry not found |
VoVanPhuc/Wav2Vec_Vietnamese | b1efaddf37773896f124d67a890c80bc3c0c3f49 | 2021-08-03T01:15:09.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | VoVanPhuc | null | VoVanPhuc/Wav2Vec_Vietnamese | 1 | 1 | transformers | 28,489 | Entry not found |
VoVanPhuc/wav2vec2-norwegian-and-english | 3dfdd5211b73084e191f9ce8bd3e2ae9dcb8e8c4 | 2021-08-09T16:57:19.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | VoVanPhuc | null | VoVanPhuc/wav2vec2-norwegian-and-english | 1 | null | transformers | 28,490 | Entry not found |
WSS/wav2vec2-large-xlsr-53-vietnamese | b9249703eed5da9ad1b7b9a2cd5ef0f65ac1108a | 2021-11-12T09:21:38.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | WSS | null | WSS/wav2vec2-large-xlsr-53-vietnamese | 1 | null | transformers | 28,491 | "Hello"
|
Weipeng/dummy-model | 80f915582e17ac2bb14544b4a0e713ed93840892 | 2021-07-02T11:16:59.000Z | [
"pytorch",
"camembert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | false | Weipeng | null | Weipeng/dummy-model | 1 | null | transformers | 28,492 | Entry not found |
Weiqin/roberta-large-finetuned-race-roberta | 82123785dbdc589f3e01222e73363b722db0c898 | 2021-11-15T04:29:33.000Z | [
"pytorch",
"tensorboard",
"roberta",
"multiple-choice",
"transformers"
] | multiple-choice | false | Weiqin | null | Weiqin/roberta-large-finetuned-race-roberta | 1 | null | transformers | 28,493 | Entry not found |
Wessel/DiabloGPT-medium-harrypotter | 4598e126bab2d2ea9477bc7ef0f6eb818203c0f2 | 2021-09-11T20:37:06.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Wessel | null | Wessel/DiabloGPT-medium-harrypotter | 1 | null | transformers | 28,494 | ---
tags:
- conversational
---
# Harry Potter DaibloGPT Model |
White/white-bot | ea9158023ca3a4c7ea2509541e8741acb6accacd | 2021-09-06T14:13:46.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | White | null | White/white-bot | 1 | null | transformers | 28,495 | ---
tags:
- conversational
---
# White's Bot |
Whitez/DialoGPT-small-twety | 08b0ddeef6cc1018f86c7a5a8b41a11f0a11da21 | 2021-10-09T23:06:49.000Z | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | false | Whitez | null | Whitez/DialoGPT-small-twety | 1 | null | transformers | 28,496 | ---
tags:
- conversational
---
# Twety DialoGPT Model |
Wikidepia/w2v2-id-tmp | a2896d63465a49bddc8c50d4b64d47ad4e462b6c | 2022-02-18T07:55:42.000Z | [
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | automatic-speech-recognition | false | Wikidepia | null | Wikidepia/w2v2-id-tmp | 1 | null | transformers | 28,497 | Entry not found |
WikinewsSum/bart-large-multi-combine-wiki-news | cf54e1db3626a16bde1aa3e24911a47587c3568f | 2020-07-01T08:25:39.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | WikinewsSum | null | WikinewsSum/bart-large-multi-combine-wiki-news | 1 | null | transformers | 28,498 | Entry not found |
WikinewsSum/bart-large-multi-fr-wiki-news | 3e1c4aaa92b5f82bd2bcda0884124e640e5047d5 | 2020-07-01T08:35:41.000Z | [
"pytorch",
"bart",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | false | WikinewsSum | null | WikinewsSum/bart-large-multi-fr-wiki-news | 1 | null | transformers | 28,499 | Entry not found |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.