modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-07-14 06:27:53
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
519 values
tags
listlengths
1
4.05k
pipeline_tag
stringclasses
55 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-07-14 06:27:45
card
stringlengths
11
1.01M
jonatasgrosman/exp_w2v2t_uk_unispeech_s607
jonatasgrosman
2022-07-10T13:30:53Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:30:30Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_unispeech_s607 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_unispeech_s244
jonatasgrosman
2022-07-10T13:27:38Z
4
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:27:14Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_unispeech_s244 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_xlsr-53_s965
jonatasgrosman
2022-07-10T13:18:51Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:18:27Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_xlsr-53_s965 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-100k_s149
jonatasgrosman
2022-07-10T13:10:05Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:09:40Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-100k_s149 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_wav2vec2_s317
jonatasgrosman
2022-07-10T12:56:58Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:56:24Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_wav2vec2_s317 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_wav2vec2_s646
jonatasgrosman
2022-07-10T12:53:07Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:52:41Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_wav2vec2_s646 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_wav2vec2_s578
jonatasgrosman
2022-07-10T12:49:32Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:49:06Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_wav2vec2_s578 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-it_s962
jonatasgrosman
2022-07-10T12:46:24Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:45:54Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-it_s962 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-it_s946
jonatasgrosman
2022-07-10T12:42:12Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:41:21Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-it_s946 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
Manishkalra/discourse_classification_using_robrta_base
Manishkalra
2022-07-10T12:41:59Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-10T10:52:40Z
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: discourse_classification_using_robrta_base results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # discourse_classification_using_robrta_base This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0832 - Accuracy: 0.6592 - F1: 0.6592 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
croumegous/q-FrozenLake-v1-4x4-noSlippery
croumegous
2022-07-10T12:39:35Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-10T12:34:58Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="croumegous/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
jonatasgrosman/exp_w2v2t_de_vp-it_s184
jonatasgrosman
2022-07-10T12:31:48Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:31:03Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-it_s184 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_r-wav2vec2_s466
jonatasgrosman
2022-07-10T12:28:19Z
10
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:27:31Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_r-wav2vec2_s466 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_xls-r_s438
jonatasgrosman
2022-07-10T12:16:53Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:16:20Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_xls-r_s438 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_xls-r_s143
jonatasgrosman
2022-07-10T12:13:39Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:12:57Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_xls-r_s143 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_xls-r_s335
jonatasgrosman
2022-07-10T12:10:10Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:09:26Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_xls-r_s335 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_unispeech-sat_s480
jonatasgrosman
2022-07-10T12:06:37Z
5
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:05:47Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_unispeech-sat_s480 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_unispeech-sat_s968
jonatasgrosman
2022-07-10T11:58:51Z
5
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:58:11Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_unispeech-sat_s968 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-nl_s283
jonatasgrosman
2022-07-10T11:55:24Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:54:33Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-nl_s283 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-nl_s8
jonatasgrosman
2022-07-10T11:48:28Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:47:59Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-nl_s8 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-es_s929
jonatasgrosman
2022-07-10T11:45:20Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:44:52Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-es_s929 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-es_s872
jonatasgrosman
2022-07-10T11:42:06Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:41:34Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-es_s872 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-es_s189
jonatasgrosman
2022-07-10T11:37:25Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:36:37Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-es_s189 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-fr_s561
jonatasgrosman
2022-07-10T11:27:30Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:26:45Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-fr_s561 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_wavlm_s824
jonatasgrosman
2022-07-10T11:10:49Z
3
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:10:06Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_wavlm_s824 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_no-pretraining_s800
jonatasgrosman
2022-07-10T11:03:48Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:03:13Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_no-pretraining_s800 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_no-pretraining_s286
jonatasgrosman
2022-07-10T10:59:53Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:59:13Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_no-pretraining_s286 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-sv_s470
jonatasgrosman
2022-07-10T10:46:22Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:45:41Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-sv_s470 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_hubert_s55
jonatasgrosman
2022-07-10T10:39:16Z
3
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:38:28Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_hubert_s55 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_hubert_s921
jonatasgrosman
2022-07-10T10:35:39Z
3
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:34:53Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_hubert_s921 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_hubert_s126
jonatasgrosman
2022-07-10T10:31:00Z
3
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:30:14Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_hubert_s126 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_unispeech_s592
jonatasgrosman
2022-07-10T10:22:49Z
4
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:22:00Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_unispeech_s592 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_xlsr-53_s688
jonatasgrosman
2022-07-10T10:15:31Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:14:47Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_xlsr-53_s688 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_xlsr-53_s509
jonatasgrosman
2022-07-10T10:12:05Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:11:32Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_xlsr-53_s509 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-100k_s627
jonatasgrosman
2022-07-10T10:05:17Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:04:41Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-100k_s627 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-100k_s159
jonatasgrosman
2022-07-10T10:00:53Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:00:18Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-100k_s159 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
huggingtweets/bardissimo
huggingtweets
2022-07-10T09:55:39Z
4
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-07-10T09:42:46Z
--- language: en thumbnail: http://www.huggingtweets.com/bardissimo/1657446903598/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1864403542/-1_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Alexander Bard</div> <div style="text-align: center; font-size: 14px;">@bardissimo</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Alexander Bard. | Data | Alexander Bard | | --- | --- | | Tweets downloaded | 3221 | | Retweets | 626 | | Short tweets | 23 | | Tweets kept | 2572 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2yokf106/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @bardissimo's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/yymc0teo) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/yymc0teo/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/bardissimo') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
jonatasgrosman/exp_w2v2t_de_wav2vec2_s982
jonatasgrosman
2022-07-10T09:47:46Z
10
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:46:58Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_wav2vec2_s982 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
geninhu/fastai-style-transfer-japanese-art
geninhu
2022-07-10T09:35:47Z
0
0
fastai
[ "fastai", "region:us" ]
null
2022-07-10T09:35:21Z
--- tags: - fastai --- # Amazing! 🥳 Congratulations on hosting your fastai model on the Hugging Face Hub! # Some next steps 1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))! 2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)). 3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)! Greetings fellow fastlearner 🤝! Don't forget to delete this content from your model card. --- # Model card ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed
jonatasgrosman/exp_w2v2t_id_vp-it_s609
jonatasgrosman
2022-07-10T09:29:17Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:28:44Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-it_s609 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_r-wav2vec2_s387
jonatasgrosman
2022-07-10T09:22:00Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:21:36Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_r-wav2vec2_s387 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_r-wav2vec2_s79
jonatasgrosman
2022-07-10T09:18:59Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:18:33Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_r-wav2vec2_s79 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
croumegous/ppo-LunarLander-v2
croumegous
2022-07-10T09:10:56Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-09T17:34:50Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 300.58 +/- 19.30 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
jonatasgrosman/exp_w2v2t_id_xls-r_s324
jonatasgrosman
2022-07-10T09:09:58Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:09:13Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_xls-r_s324 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech-sat_s477
jonatasgrosman
2022-07-10T09:06:39Z
3
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:06:15Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech-sat_s477 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech-sat_s287
jonatasgrosman
2022-07-10T09:03:43Z
3
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:03:19Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech-sat_s287 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-nl_s878
jonatasgrosman
2022-07-10T08:57:47Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:57:02Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-nl_s878 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-nl_s33
jonatasgrosman
2022-07-10T08:54:32Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:54:02Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-nl_s33 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-nl_s496
jonatasgrosman
2022-07-10T08:51:27Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:51:01Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-nl_s496 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
casasdorjunior/t5-small-finetuned-xlsum
casasdorjunior
2022-07-10T08:50:55Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "dataset:xlsum", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2022-07-10T06:26:38Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - xlsum metrics: - rouge model-index: - name: t5-small-finetuned-xlsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: xlsum type: xlsum args: spanish metrics: - name: Rouge1 type: rouge value: 15.4289 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-xlsum This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the xlsum dataset. It achieves the following results on the evaluation set: - Loss: 2.6974 - Rouge1: 15.4289 - Rouge2: 3.146 - Rougel: 12.7682 - Rougelsum: 12.912 - Gen Len: 18.9889 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 2.9764 | 1.0 | 2382 | 2.6974 | 15.4289 | 3.146 | 12.7682 | 12.912 | 18.9889 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
jonatasgrosman/exp_w2v2t_id_vp-es_s425
jonatasgrosman
2022-07-10T08:45:39Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:45:15Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-es_s425 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-es_s920
jonatasgrosman
2022-07-10T08:42:35Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:42:10Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-es_s920 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-fr_s27
jonatasgrosman
2022-07-10T08:39:22Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:38:58Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-fr_s27 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-fr_s222
jonatasgrosman
2022-07-10T08:33:09Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:32:44Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-fr_s222 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech-ml_s418
jonatasgrosman
2022-07-10T08:30:13Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:29:37Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech-ml_s418 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech-ml_s325
jonatasgrosman
2022-07-10T08:18:06Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:17:22Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech-ml_s325 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech-ml_s621
jonatasgrosman
2022-07-10T08:09:21Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:08:56Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech-ml_s621 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_wavlm_s821
jonatasgrosman
2022-07-10T08:00:42Z
3
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:00:08Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_wavlm_s821 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_wavlm_s557
jonatasgrosman
2022-07-10T07:38:34Z
3
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T07:37:47Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_wavlm_s557 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_no-pretraining_s861
jonatasgrosman
2022-07-10T07:08:09Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T07:07:46Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_no-pretraining_s861 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-sv_s363
jonatasgrosman
2022-07-10T06:47:57Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T06:47:33Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-sv_s363 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-sv_s331
jonatasgrosman
2022-07-10T06:38:13Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T06:37:44Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-sv_s331 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_hubert_s246
jonatasgrosman
2022-07-10T06:24:49Z
3
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T06:24:22Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_hubert_s246 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
SusBioRes-UBC/testpyramidsrnd
SusBioRes-UBC
2022-07-10T06:22:15Z
5
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "unity-ml-agents", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
2022-07-10T06:22:10Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: SusBioRes-UBC/testpyramidsrnd 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
jonatasgrosman/exp_w2v2t_id_unispeech_s791
jonatasgrosman
2022-07-10T05:17:03Z
4
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T05:16:40Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech_s791 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
freedomking/ernie-ctm-nptag
freedomking
2022-07-10T05:13:13Z
8
0
transformers
[ "transformers", "pytorch", "bert", "endpoints_compatible", "region:us" ]
null
2022-07-10T05:03:13Z
## Introduction ### Ernie-CTM-NPTag Ernie-CTM-NPTag使用ERNIE-CTM+prompt训练而成,使用启发式搜索解码,保证分类结果都在标签体系之内。在微调任务中提供了一个中文名词短语标注的任务,旨在对中文名词短语进行细粒度分类。 More detail: https://github.com/PaddlePaddle/PaddleNLP/tree/develop/examples/text_to_knowledge/nptag
jonatasgrosman/exp_w2v2t_id_xlsr-53_s358
jonatasgrosman
2022-07-10T05:00:23Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T04:59:57Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_xlsr-53_s358 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_xlsr-53_s149
jonatasgrosman
2022-07-10T04:50:13Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T04:49:38Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_xlsr-53_s149 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-100k_s764
jonatasgrosman
2022-07-10T04:30:40Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T04:30:14Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-100k_s764 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-100k_s615
jonatasgrosman
2022-07-10T04:20:20Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T04:19:56Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-100k_s615 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
huggingtweets/06melihgokcek
huggingtweets
2022-07-10T03:44:22Z
3
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-07-10T03:42:53Z
--- language: en thumbnail: http://www.huggingtweets.com/06melihgokcek/1657424657914/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1419298461/Baskan_0383_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">İbrahim Melih Gökçek</div> <div style="text-align: center; font-size: 14px;">@06melihgokcek</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from İbrahim Melih Gökçek. | Data | İbrahim Melih Gökçek | | --- | --- | | Tweets downloaded | 3237 | | Retweets | 457 | | Short tweets | 307 | | Tweets kept | 2473 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/b48osocr/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @06melihgokcek's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3d3h0tqk) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3d3h0tqk/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/06melihgokcek') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
jonatasgrosman/exp_w2v2t_zh-cn_vp-it_s132
jonatasgrosman
2022-07-10T03:00:31Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:59:53Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-it_s132 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-it_s42
jonatasgrosman
2022-07-10T02:57:00Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:56:33Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-it_s42 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-it_s607
jonatasgrosman
2022-07-10T02:53:51Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:53:25Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-it_s607 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_r-wav2vec2_s387
jonatasgrosman
2022-07-10T02:47:37Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:46:55Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_r-wav2vec2_s387 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_xls-r_s904
jonatasgrosman
2022-07-10T02:40:54Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:40:08Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_xls-r_s904 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_xls-r_s847
jonatasgrosman
2022-07-10T02:37:42Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:37:18Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_xls-r_s847 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_xls-r_s108
jonatasgrosman
2022-07-10T02:33:57Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:33:16Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_xls-r_s108 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_unispeech-sat_s840
jonatasgrosman
2022-07-10T02:26:50Z
4
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:26:24Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_unispeech-sat_s840 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_unispeech-sat_s762
jonatasgrosman
2022-07-10T02:23:54Z
3
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:23:29Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_unispeech-sat_s762 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-nl_s423
jonatasgrosman
2022-07-10T02:20:52Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:20:10Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-nl_s423 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-nl_s160
jonatasgrosman
2022-07-10T02:17:34Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:16:47Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-nl_s160 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-nl_s418
jonatasgrosman
2022-07-10T02:14:19Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:13:54Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-nl_s418 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-es_s869
jonatasgrosman
2022-07-10T02:07:18Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:06:54Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-es_s869 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-es_s408
jonatasgrosman
2022-07-10T02:04:12Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:03:47Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-es_s408 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-fr_s732
jonatasgrosman
2022-07-10T02:00:13Z
5
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T01:59:48Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-fr_s732 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-fr_s451
jonatasgrosman
2022-07-10T01:57:19Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T01:56:48Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-fr_s451 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_wavlm_s596
jonatasgrosman
2022-07-10T01:33:18Z
8
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T01:32:32Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_wavlm_s596 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_no-pretraining_s805
jonatasgrosman
2022-07-10T01:26:59Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T01:26:36Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_no-pretraining_s805 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-sv_s331
jonatasgrosman
2022-07-10T01:13:48Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T01:13:25Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-sv_s331 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_hubert_s449
jonatasgrosman
2022-07-10T01:06:08Z
4
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T01:05:36Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_hubert_s449 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_hubert_s358
jonatasgrosman
2022-07-10T01:03:01Z
4
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T01:02:31Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_hubert_s358 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_unispeech_s929
jonatasgrosman
2022-07-10T00:53:51Z
5
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T00:53:27Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_unispeech_s929 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_xlsr-53_s817
jonatasgrosman
2022-07-10T00:50:51Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T00:50:26Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_xlsr-53_s817 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_xlsr-53_s533
jonatasgrosman
2022-07-10T00:47:49Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T00:47:24Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_xlsr-53_s533 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-100k_s624
jonatasgrosman
2022-07-10T00:29:40Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T00:29:03Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-100k_s624 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_wav2vec2_s842
jonatasgrosman
2022-07-10T00:21:17Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T00:20:53Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_wav2vec2_s842 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_wav2vec2_s615
jonatasgrosman
2022-07-10T00:12:27Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T00:12:00Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_wav2vec2_s615 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_fa_vp-it_s985
jonatasgrosman
2022-07-10T00:08:53Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "fa", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T00:08:27Z
--- language: - fa license: apache-2.0 tags: - automatic-speech-recognition - fa datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_fa_vp-it_s985 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (fa)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_fa_r-wav2vec2_s401
jonatasgrosman
2022-07-09T23:56:58Z
23
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "fa", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-09T23:56:34Z
--- language: - fa license: apache-2.0 tags: - automatic-speech-recognition - fa datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_fa_r-wav2vec2_s401 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (fa)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.