modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-06-23 18:27:52
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
492 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
54 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-06-23 18:25:26
card
stringlengths
11
1.01M
tomekkorbak/infallible_hawking
tomekkorbak
2022-12-13T18:41:47Z
0
1
null
[ "generated_from_trainer", "en", "dataset:tomekkorbak/detoxify-pile-chunk3-0-50000", "dataset:tomekkorbak/detoxify-pile-chunk3-50000-100000", "dataset:tomekkorbak/detoxify-pile-chunk3-100000-150000", "dataset:tomekkorbak/detoxify-pile-chunk3-150000-200000", "dataset:tomekkorbak/detoxify-pile-chunk3-200000-250000", "dataset:tomekkorbak/detoxify-pile-chunk3-250000-300000", "dataset:tomekkorbak/detoxify-pile-chunk3-300000-350000", "dataset:tomekkorbak/detoxify-pile-chunk3-350000-400000", "dataset:tomekkorbak/detoxify-pile-chunk3-400000-450000", "dataset:tomekkorbak/detoxify-pile-chunk3-450000-500000", "dataset:tomekkorbak/detoxify-pile-chunk3-500000-550000", "dataset:tomekkorbak/detoxify-pile-chunk3-550000-600000", "dataset:tomekkorbak/detoxify-pile-chunk3-600000-650000", "dataset:tomekkorbak/detoxify-pile-chunk3-650000-700000", "dataset:tomekkorbak/detoxify-pile-chunk3-700000-750000", "dataset:tomekkorbak/detoxify-pile-chunk3-750000-800000", "dataset:tomekkorbak/detoxify-pile-chunk3-800000-850000", "dataset:tomekkorbak/detoxify-pile-chunk3-850000-900000", "dataset:tomekkorbak/detoxify-pile-chunk3-900000-950000", "dataset:tomekkorbak/detoxify-pile-chunk3-950000-1000000", "dataset:tomekkorbak/detoxify-pile-chunk3-1000000-1050000", "dataset:tomekkorbak/detoxify-pile-chunk3-1050000-1100000", "dataset:tomekkorbak/detoxify-pile-chunk3-1100000-1150000", "dataset:tomekkorbak/detoxify-pile-chunk3-1150000-1200000", "dataset:tomekkorbak/detoxify-pile-chunk3-1200000-1250000", "dataset:tomekkorbak/detoxify-pile-chunk3-1250000-1300000", "dataset:tomekkorbak/detoxify-pile-chunk3-1300000-1350000", "dataset:tomekkorbak/detoxify-pile-chunk3-1350000-1400000", "dataset:tomekkorbak/detoxify-pile-chunk3-1400000-1450000", "dataset:tomekkorbak/detoxify-pile-chunk3-1450000-1500000", "dataset:tomekkorbak/detoxify-pile-chunk3-1500000-1550000", "dataset:tomekkorbak/detoxify-pile-chunk3-1550000-1600000", "dataset:tomekkorbak/detoxify-pile-chunk3-1600000-1650000", "dataset:tomekkorbak/detoxify-pile-chunk3-1650000-1700000", "dataset:tomekkorbak/detoxify-pile-chunk3-1700000-1750000", "dataset:tomekkorbak/detoxify-pile-chunk3-1750000-1800000", "dataset:tomekkorbak/detoxify-pile-chunk3-1800000-1850000", "dataset:tomekkorbak/detoxify-pile-chunk3-1850000-1900000", "dataset:tomekkorbak/detoxify-pile-chunk3-1900000-1950000", "license:mit", "region:us" ]
null
2022-12-13T18:41:40Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/detoxify-pile-chunk3-0-50000 - tomekkorbak/detoxify-pile-chunk3-50000-100000 - tomekkorbak/detoxify-pile-chunk3-100000-150000 - tomekkorbak/detoxify-pile-chunk3-150000-200000 - tomekkorbak/detoxify-pile-chunk3-200000-250000 - tomekkorbak/detoxify-pile-chunk3-250000-300000 - tomekkorbak/detoxify-pile-chunk3-300000-350000 - tomekkorbak/detoxify-pile-chunk3-350000-400000 - tomekkorbak/detoxify-pile-chunk3-400000-450000 - tomekkorbak/detoxify-pile-chunk3-450000-500000 - tomekkorbak/detoxify-pile-chunk3-500000-550000 - tomekkorbak/detoxify-pile-chunk3-550000-600000 - tomekkorbak/detoxify-pile-chunk3-600000-650000 - tomekkorbak/detoxify-pile-chunk3-650000-700000 - tomekkorbak/detoxify-pile-chunk3-700000-750000 - tomekkorbak/detoxify-pile-chunk3-750000-800000 - tomekkorbak/detoxify-pile-chunk3-800000-850000 - tomekkorbak/detoxify-pile-chunk3-850000-900000 - tomekkorbak/detoxify-pile-chunk3-900000-950000 - tomekkorbak/detoxify-pile-chunk3-950000-1000000 - tomekkorbak/detoxify-pile-chunk3-1000000-1050000 - tomekkorbak/detoxify-pile-chunk3-1050000-1100000 - tomekkorbak/detoxify-pile-chunk3-1100000-1150000 - tomekkorbak/detoxify-pile-chunk3-1150000-1200000 - tomekkorbak/detoxify-pile-chunk3-1200000-1250000 - tomekkorbak/detoxify-pile-chunk3-1250000-1300000 - tomekkorbak/detoxify-pile-chunk3-1300000-1350000 - tomekkorbak/detoxify-pile-chunk3-1350000-1400000 - tomekkorbak/detoxify-pile-chunk3-1400000-1450000 - tomekkorbak/detoxify-pile-chunk3-1450000-1500000 - tomekkorbak/detoxify-pile-chunk3-1500000-1550000 - tomekkorbak/detoxify-pile-chunk3-1550000-1600000 - tomekkorbak/detoxify-pile-chunk3-1600000-1650000 - tomekkorbak/detoxify-pile-chunk3-1650000-1700000 - tomekkorbak/detoxify-pile-chunk3-1700000-1750000 - tomekkorbak/detoxify-pile-chunk3-1750000-1800000 - tomekkorbak/detoxify-pile-chunk3-1800000-1850000 - tomekkorbak/detoxify-pile-chunk3-1850000-1900000 - tomekkorbak/detoxify-pile-chunk3-1900000-1950000 model-index: - name: infallible_hawking results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # infallible_hawking This model was trained from scratch on the tomekkorbak/detoxify-pile-chunk3-0-50000, the tomekkorbak/detoxify-pile-chunk3-50000-100000, the tomekkorbak/detoxify-pile-chunk3-100000-150000, the tomekkorbak/detoxify-pile-chunk3-150000-200000, the tomekkorbak/detoxify-pile-chunk3-200000-250000, the tomekkorbak/detoxify-pile-chunk3-250000-300000, the tomekkorbak/detoxify-pile-chunk3-300000-350000, the tomekkorbak/detoxify-pile-chunk3-350000-400000, the tomekkorbak/detoxify-pile-chunk3-400000-450000, the tomekkorbak/detoxify-pile-chunk3-450000-500000, the tomekkorbak/detoxify-pile-chunk3-500000-550000, the tomekkorbak/detoxify-pile-chunk3-550000-600000, the tomekkorbak/detoxify-pile-chunk3-600000-650000, the tomekkorbak/detoxify-pile-chunk3-650000-700000, the tomekkorbak/detoxify-pile-chunk3-700000-750000, the tomekkorbak/detoxify-pile-chunk3-750000-800000, the tomekkorbak/detoxify-pile-chunk3-800000-850000, the tomekkorbak/detoxify-pile-chunk3-850000-900000, the tomekkorbak/detoxify-pile-chunk3-900000-950000, the tomekkorbak/detoxify-pile-chunk3-950000-1000000, the tomekkorbak/detoxify-pile-chunk3-1000000-1050000, the tomekkorbak/detoxify-pile-chunk3-1050000-1100000, the tomekkorbak/detoxify-pile-chunk3-1100000-1150000, the tomekkorbak/detoxify-pile-chunk3-1150000-1200000, the tomekkorbak/detoxify-pile-chunk3-1200000-1250000, the tomekkorbak/detoxify-pile-chunk3-1250000-1300000, the tomekkorbak/detoxify-pile-chunk3-1300000-1350000, the tomekkorbak/detoxify-pile-chunk3-1350000-1400000, the tomekkorbak/detoxify-pile-chunk3-1400000-1450000, the tomekkorbak/detoxify-pile-chunk3-1450000-1500000, the tomekkorbak/detoxify-pile-chunk3-1500000-1550000, the tomekkorbak/detoxify-pile-chunk3-1550000-1600000, the tomekkorbak/detoxify-pile-chunk3-1600000-1650000, the tomekkorbak/detoxify-pile-chunk3-1650000-1700000, the tomekkorbak/detoxify-pile-chunk3-1700000-1750000, the tomekkorbak/detoxify-pile-chunk3-1750000-1800000, the tomekkorbak/detoxify-pile-chunk3-1800000-1850000, the tomekkorbak/detoxify-pile-chunk3-1850000-1900000 and the tomekkorbak/detoxify-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 25000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/detoxify-pile-chunk3-0-50000', 'tomekkorbak/detoxify-pile-chunk3-50000-100000', 'tomekkorbak/detoxify-pile-chunk3-100000-150000', 'tomekkorbak/detoxify-pile-chunk3-150000-200000', 'tomekkorbak/detoxify-pile-chunk3-200000-250000', 'tomekkorbak/detoxify-pile-chunk3-250000-300000', 'tomekkorbak/detoxify-pile-chunk3-300000-350000', 'tomekkorbak/detoxify-pile-chunk3-350000-400000', 'tomekkorbak/detoxify-pile-chunk3-400000-450000', 'tomekkorbak/detoxify-pile-chunk3-450000-500000', 'tomekkorbak/detoxify-pile-chunk3-500000-550000', 'tomekkorbak/detoxify-pile-chunk3-550000-600000', 'tomekkorbak/detoxify-pile-chunk3-600000-650000', 'tomekkorbak/detoxify-pile-chunk3-650000-700000', 'tomekkorbak/detoxify-pile-chunk3-700000-750000', 'tomekkorbak/detoxify-pile-chunk3-750000-800000', 'tomekkorbak/detoxify-pile-chunk3-800000-850000', 'tomekkorbak/detoxify-pile-chunk3-850000-900000', 'tomekkorbak/detoxify-pile-chunk3-900000-950000', 'tomekkorbak/detoxify-pile-chunk3-950000-1000000', 'tomekkorbak/detoxify-pile-chunk3-1000000-1050000', 'tomekkorbak/detoxify-pile-chunk3-1050000-1100000', 'tomekkorbak/detoxify-pile-chunk3-1100000-1150000', 'tomekkorbak/detoxify-pile-chunk3-1150000-1200000', 'tomekkorbak/detoxify-pile-chunk3-1200000-1250000', 'tomekkorbak/detoxify-pile-chunk3-1250000-1300000', 'tomekkorbak/detoxify-pile-chunk3-1300000-1350000', 'tomekkorbak/detoxify-pile-chunk3-1350000-1400000', 'tomekkorbak/detoxify-pile-chunk3-1400000-1450000', 'tomekkorbak/detoxify-pile-chunk3-1450000-1500000', 'tomekkorbak/detoxify-pile-chunk3-1500000-1550000', 'tomekkorbak/detoxify-pile-chunk3-1550000-1600000', 'tomekkorbak/detoxify-pile-chunk3-1600000-1650000', 'tomekkorbak/detoxify-pile-chunk3-1650000-1700000', 'tomekkorbak/detoxify-pile-chunk3-1700000-1750000', 'tomekkorbak/detoxify-pile-chunk3-1750000-1800000', 'tomekkorbak/detoxify-pile-chunk3-1800000-1850000', 'tomekkorbak/detoxify-pile-chunk3-1850000-1900000', 'tomekkorbak/detoxify-pile-chunk3-1900000-1950000'], 'is_split_by_sentences': True, 'skip_tokens': 1661599744}, 'generation': {'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}, {'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'challenging_rtp', 'num_samples': 2048, 'prompts_path': 'resources/challenging_rtp.jsonl'}], 'scorer_config': {'device': 'cuda:0'}}, 'kl_gpt3_callback': {'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': False, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'revision': 'f9cb81e577effccc64697016af1e8eaf2bf5dcd2'}, 'path_or_name': 'tomekkorbak/nervous_wozniak'}, 'objective': {'alpha': 1, 'name': 'Unlikelihood', 'score_threshold': 0.00078}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'infallible_hawking', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output104340', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25354, 'save_strategy': 'steps', 'seed': 42, 'tokens_already_seen': 1661599744, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/pr73stnd
gerryc/whisper-medium-ar
gerryc
2022-12-13T18:40:58Z
13
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "ar", "dataset:mozilla-foundation/common_voice_11_0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-13T00:17:01Z
--- language: - ar tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: "Whisper Medium AR - gerryc" results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: FLEURS type: google/fleurs config: ar_eg split: test args: ar metrics: - name: Wer type: wer value: 52.8 --- # Whisper Medium Ar - gerryc Model was trained on CommonVoice Train. Tensorboard eval is on 256 samples of CommonVoice Dev. Not normalized or lowercased. Evaluated and saved every 2500 steps. This model was overtrained and is overfitted. About 2000 to 4000 steps is around the best spot to stop training.
ghatgetanuj/albert-large-v2_cls_sst2
ghatgetanuj
2022-12-13T17:47:06Z
5
0
transformers
[ "transformers", "pytorch", "tensorboard", "albert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-12-13T17:38:06Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: albert-large-v2_cls_sst2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # albert-large-v2_cls_sst2 This model is a fine-tuned version of [albert-large-v2](https://huggingface.co/albert-large-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3582 - Accuracy: 0.9300 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 433 | 0.3338 | 0.8933 | | 0.3977 | 2.0 | 866 | 0.2406 | 0.9197 | | 0.2954 | 3.0 | 1299 | 0.2865 | 0.9278 | | 0.2196 | 4.0 | 1732 | 0.3251 | 0.9243 | | 0.1105 | 5.0 | 2165 | 0.3582 | 0.9300 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
Qiliang/bart-large-cnn-samsum-ChatGPT_v3
Qiliang
2022-12-13T17:45:10Z
9,995
31
transformers
[ "transformers", "pytorch", "bart", "text2text-generation", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2022-12-13T17:32:47Z
--- license: mit tags: - generated_from_trainer model-index: - name: bart-large-cnn-samsum-ChatGPT_v3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-cnn-samsum-ChatGPT_v3 This model is a fine-tuned version of [philschmid/bart-large-cnn-samsum](https://huggingface.co/philschmid/bart-large-cnn-samsum) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1 - Datasets 2.6.1 - Tokenizers 0.13.2
Pranavsk/Lunar_lander
Pranavsk
2022-12-13T17:44:50Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T17:12:01Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -95.16 +/- 16.45 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
HikaruBear/ddpm-butterflies-128
HikaruBear
2022-12-13T17:42:11Z
1
0
diffusers
[ "diffusers", "tensorboard", "en", "dataset:huggan/smithsonian_butterflies_subset", "license:apache-2.0", "diffusers:DDPMPipeline", "region:us" ]
null
2022-12-13T05:45:21Z
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/HikaruBear/ddpm-butterflies-128/tensorboard?#scalars)
ghatgetanuj/albert-large-v2_cls_CR
ghatgetanuj
2022-12-13T17:35:22Z
13
0
transformers
[ "transformers", "pytorch", "tensorboard", "albert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-12-13T17:25:56Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: albert-large-v2_cls_CR results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # albert-large-v2_cls_CR This model is a fine-tuned version of [albert-large-v2](https://huggingface.co/albert-large-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.6549 - Accuracy: 0.6383 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 213 | 0.3524 | 0.8803 | | No log | 2.0 | 426 | 0.6839 | 0.6383 | | 0.5671 | 3.0 | 639 | 0.6622 | 0.6383 | | 0.5671 | 4.0 | 852 | 0.6549 | 0.6383 | | 0.6652 | 5.0 | 1065 | 0.6549 | 0.6383 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
vadimMidav/sd-class-butterflies-64
vadimMidav
2022-12-13T17:34:55Z
1
0
diffusers
[ "diffusers", "pytorch", "unconditional-image-generation", "diffusion-models-class", "license:mit", "diffusers:DDPMPipeline", "region:us" ]
unconditional-image-generation
2022-12-13T17:34:31Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('vadimMidav/sd-class-butterflies-64') image = pipeline().images[0] image ```
jonatasgrosman/whisper-small-pt-cv11-v6
jonatasgrosman
2022-12-13T17:33:16Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "pt", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-13T05:16:14Z
--- language: - pt license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Portuguese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 pt type: mozilla-foundation/common_voice_11_0 config: pt split: test args: pt metrics: - name: Wer type: wer value: 11.972265023112481 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Portuguese This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 pt dataset. It achieves the following results on the evaluation set: - Loss: 0.2738 - Wer: 11.9723 - Cer: 4.8273 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:| | 0.5235 | 0.92 | 500 | 0.3605 | 15.4083 | 6.1205 | | 0.3839 | 1.84 | 1000 | 0.3034 | 14.2835 | 5.6010 | | 0.2828 | 2.76 | 1500 | 0.2852 | 13.4977 | 5.1727 | | 0.2367 | 3.68 | 2000 | 0.2768 | 12.9122 | 5.2280 | | 0.1832 | 4.6 | 2500 | 0.2728 | 12.2496 | 4.9157 | | 0.1549 | 5.52 | 3000 | 0.2730 | 12.0647 | 4.8384 | | 0.1318 | 6.45 | 3500 | 0.2757 | 12.0955 | 4.8135 | | 0.1077 | 7.37 | 4000 | 0.2738 | 11.9723 | 4.8273 | | 0.0969 | 8.29 | 4500 | 0.2784 | 12.1572 | 4.9212 | | 0.0813 | 9.21 | 5000 | 0.2805 | 12.3112 | 5.0207 | | 0.0751 | 10.13 | 5500 | 0.2831 | 12.0801 | 4.8494 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu116 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
bharat-raghunathan/whisper-tiny-ta
bharat-raghunathan
2022-12-13T17:16:32Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "ta", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-13T03:17:15Z
--- language: - ta license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: Whisper Tiny Tamil - Bharat Raghunathan results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Tiny Tamil - Bharat Raghunathan This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 10 - training_steps: 500 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
bjubert/10_epochs_camembert_jb
bjubert
2022-12-13T17:01:45Z
14
0
transformers
[ "transformers", "pytorch", "tensorboard", "camembert", "token-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-12-13T11:52:42Z
--- license: mit tags: - generated_from_trainer model-index: - name: 10_epochs_camembert_jb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 10_epochs_camembert_jb This model is a fine-tuned version of [Jean-Baptiste/camembert-ner](https://huggingface.co/Jean-Baptiste/camembert-ner) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1070 - Overall Precision: 0.8279 - Overall Recall: 0.8660 - Overall F1: 0.8465 - Overall Accuracy: 0.9803 - Er F1: 0.8617 - Oc F1: 0.8347 - Umanprod F1: 0.7297 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | Er F1 | Oc F1 | Umanprod F1 | |:-------------:|:-----:|:----:|:---------------:|:-----------------:|:--------------:|:----------:|:----------------:|:------:|:------:|:-----------:| | 0.2805 | 1.0 | 613 | 0.0797 | 0.7802 | 0.7990 | 0.7895 | 0.9749 | 0.8187 | 0.7677 | 0.4231 | | 0.072 | 2.0 | 1226 | 0.0790 | 0.8060 | 0.8392 | 0.8223 | 0.9773 | 0.8458 | 0.8050 | 0.5574 | | 0.0511 | 3.0 | 1839 | 0.0807 | 0.8139 | 0.8623 | 0.8374 | 0.9789 | 0.8583 | 0.8200 | 0.6933 | | 0.0354 | 4.0 | 2452 | 0.0808 | 0.8097 | 0.8574 | 0.8329 | 0.9793 | 0.8589 | 0.8115 | 0.6667 | | 0.0198 | 5.0 | 3065 | 0.0940 | 0.7936 | 0.8591 | 0.8250 | 0.9781 | 0.8426 | 0.8124 | 0.6835 | | 0.0165 | 6.0 | 3678 | 0.0988 | 0.8350 | 0.8542 | 0.8445 | 0.9802 | 0.8656 | 0.8297 | 0.6486 | | 0.0126 | 7.0 | 4291 | 0.0990 | 0.8292 | 0.8692 | 0.8488 | 0.9805 | 0.8682 | 0.8340 | 0.6849 | | 0.0103 | 8.0 | 4904 | 0.1042 | 0.8246 | 0.8666 | 0.8450 | 0.9803 | 0.8630 | 0.8331 | 0.6575 | | 0.0076 | 9.0 | 5517 | 0.1066 | 0.8195 | 0.8687 | 0.8434 | 0.9801 | 0.8593 | 0.8305 | 0.7297 | | 0.0066 | 10.0 | 6130 | 0.1070 | 0.8279 | 0.8660 | 0.8465 | 0.9803 | 0.8617 | 0.8347 | 0.7297 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cpu - Datasets 2.7.1 - Tokenizers 0.13.2
shripadbhat/whisper-medium-sl
shripadbhat
2022-12-13T17:01:41Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "sl", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-13T15:41:32Z
--- language: - sl license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Medium Slovenian results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: sl split: test args: sl metrics: - name: Wer type: wer value: 16.80513044745664 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium Slovenian This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.2653 - Wer: 16.8051 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 400 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.161 | 1.33 | 100 | 0.2516 | 21.6149 | | 0.0386 | 2.66 | 200 | 0.2476 | 18.5979 | | 0.0161 | 3.99 | 300 | 0.2491 | 17.1841 | | 0.0032 | 5.33 | 400 | 0.2653 | 16.8051 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
parambharat/whisper-small-ta
parambharat
2022-12-13T16:57:13Z
19
1
transformers
[ "transformers", "pytorch", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "ta", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-11T13:58:54Z
--- language: - ta license: apache-2.0 tags: - whisper-event - generated_from_trainer metrics: - wer model-index: - name: Whisper Small Ta - Bharat Ramanathan results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: ta_in split: test metrics: - type: wer value: 15.8 name: WER - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: ta split: test metrics: - type: wer value: 11.15 name: WER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Ta - Bharat Ramanathan This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1803 - Wer: 17.1456 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.3374 | 0.1 | 500 | 0.2579 | 23.3804 | | 0.29 | 0.2 | 1000 | 0.2260 | 20.9937 | | 0.2522 | 0.3 | 1500 | 0.2139 | 20.0682 | | 0.2338 | 0.4 | 2000 | 0.2025 | 19.6785 | | 0.223 | 0.5 | 2500 | 0.1979 | 18.3147 | | 0.211 | 0.6 | 3000 | 0.1927 | 17.8276 | | 0.2032 | 0.7 | 3500 | 0.1865 | 17.3892 | | 0.1978 | 0.8 | 4000 | 0.1839 | 17.5353 | | 0.1972 | 0.9 | 4500 | 0.1812 | 17.0969 | | 0.1894 | 1.0 | 5000 | 0.1803 | 17.1456 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
tarteel-ai/whisper-base-ar-quran
tarteel-ai
2022-12-13T16:49:54Z
3,792
33
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-08T21:04:00Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-base-ar-quran results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-base-ar-quran This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0839 - Wer: 5.7544 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - total_train_batch_size: 128 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.1092 | 0.05 | 250 | 0.1969 | 13.3890 | | 0.0361 | 0.1 | 500 | 0.1583 | 10.6375 | | 0.0192 | 0.15 | 750 | 0.1109 | 8.8468 | | 0.0144 | 0.2 | 1000 | 0.1157 | 7.9754 | | 0.008 | 0.25 | 1250 | 0.1000 | 7.5360 | | 0.0048 | 1.03 | 1500 | 0.0933 | 6.8227 | | 0.0113 | 1.08 | 1750 | 0.0955 | 6.9638 | | 0.0209 | 1.13 | 2000 | 0.0824 | 6.3586 | | 0.0043 | 1.18 | 2250 | 0.0830 | 6.3444 | | 0.002 | 1.23 | 2500 | 0.1015 | 6.3025 | | 0.0013 | 2.01 | 2750 | 0.0863 | 6.0639 | | 0.0014 | 2.06 | 3000 | 0.0905 | 6.0213 | | 0.0018 | 2.11 | 3250 | 0.0864 | 6.0293 | | 0.0008 | 2.16 | 3500 | 0.0887 | 5.9308 | | 0.0029 | 2.21 | 3750 | 0.0777 | 5.9159 | | 0.0022 | 2.26 | 4000 | 0.0847 | 5.8749 | | 0.0005 | 3.05 | 4250 | 0.0827 | 5.8352 | | 0.0003 | 3.1 | 4500 | 0.0826 | 5.7800 | | 0.0006 | 3.15 | 4750 | 0.0833 | 5.7625 | | 0.0003 | 3.2 | 5000 | 0.0839 | 5.7544 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Arch4ngel/q-Taxi-v3
Arch4ngel
2022-12-13T16:48:37Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T16:48:31Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Arch4ngel/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
lewtun/setfit-minilm-distilled
lewtun
2022-12-13T16:45:33Z
6
0
setfit
[ "setfit", "pytorch", "bert", "endpoints-template", "text-classification", "sentence-similarity", "endpoints_compatible", "region:us" ]
sentence-similarity
2022-12-13T15:02:02Z
--- pipeline_tag: sentence-similarity tags: - setfit - endpoints-template - text-classification inference: false --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 2500 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 2500, "warmup_steps": 250, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Arch4ngel/q-FrozenLake-v1-4x4-noSlippery
Arch4ngel
2022-12-13T16:37:03Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T16:31:06Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Arch4ngel/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
vadimMidav/sd-class-butterflies-32
vadimMidav
2022-12-13T16:34:57Z
0
0
diffusers
[ "diffusers", "pytorch", "unconditional-image-generation", "diffusion-models-class", "license:mit", "diffusers:DDPMPipeline", "region:us" ]
unconditional-image-generation
2022-12-13T16:34:34Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('vadimMidav/sd-class-butterflies-32') image = pipeline().images[0] image ```
DanGalt/q-FrozenLake-v1-4x4-noSlippery
DanGalt
2022-12-13T16:33:52Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T16:33:41Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="DanGalt/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
soschuetze/disilbert-blm-tweets-binary
soschuetze
2022-12-13T16:24:19Z
5
0
transformers
[ "transformers", "tf", "distilbert", "text-classification", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-12-13T16:24:00Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: disilbert-blm-tweets-binary results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # disilbert-blm-tweets-binary This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.1159 - Train Accuracy: 0.9556 - Validation Loss: 0.5772 - Validation Accuracy: 0.7965 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 5e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.5941 | 0.6905 | 0.5159 | 0.7168 | 0 | | 0.4041 | 0.8212 | 0.4589 | 0.8142 | 1 | | 0.2491 | 0.9026 | 0.6014 | 0.7876 | 2 | | 0.1011 | 0.9692 | 0.7181 | 0.8053 | 3 | | 0.1159 | 0.9556 | 0.5772 | 0.7965 | 4 | ### Framework versions - Transformers 4.25.1 - TensorFlow 2.9.2 - Tokenizers 0.13.2
Atom007/content-gdrive-mydrive-fast-dreambooth-sessions-shrikant-shrikant-ckpt
Atom007
2022-12-13T15:53:12Z
2
0
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2022-12-13T15:48:09Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### /content/gdrive/MyDrive/Fast-Dreambooth/Sessions/shrikant/shrikant.ckpt Dreambooth model trained by Atom007 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept:
hizak/ppo-LunarLander-v2
hizak
2022-12-13T15:33:22Z
1
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T15:32:55Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 238.35 +/- 20.92 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
zbenmo/QLearning-Taxi-v3
zbenmo
2022-12-13T15:26:39Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T15:26:33Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: QLearning-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="zbenmo/QLearning-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Yilin98/whisper-small-hi
Yilin98
2022-12-13T15:20:23Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "hf-asr-leaderboard", "generated_from_trainer", "sv", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-06T21:03:02Z
--- language: - sv license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Swedish results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: sv-SE split: test args: 'config: sv, split: test' metrics: - name: Wer type: wer value: 19.942996961630502 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Swedish This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.3442 - Wer: 19.9430 Check [here](https://drive.google.com/file/d/10Nd0rMnLM5yEpMhI26sHVpQGcSmv2t2X/view?usp=sharing) for the result of checkpoint-4000 ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
ziemke/ppo-Huggy
ziemke
2022-12-13T15:08:46Z
6
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "unity-ml-agents", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
2022-12-13T15:08:30Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: ziemke/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
ilyaster-rl/q-FrozenLake-v1-4x4-noSlippery
ilyaster-rl
2022-12-13T14:57:05Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T14:56:54Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="ilyaster-rl/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
alicjak/Reinforce-Pixelcopter-PLE-v0
alicjak
2022-12-13T14:55:43Z
0
0
null
[ "Pixelcopter-PLE-v0", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T14:55:35Z
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-PLE-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 15.10 +/- 14.30 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
JuanCadavid/t5-small-finetuned-NL2ModelioMQ-EN
JuanCadavid
2022-12-13T14:48:14Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "dataset:generator", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2022-12-13T14:14:42Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - generator model-index: - name: t5-small-finetuned-NL2ModelioMQ-EN results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-NL2ModelioMQ-EN This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the generator dataset. It achieves the following results on the evaluation set: - Loss: 0.0000 - Rouge2 Precision: 0.9789 - Rouge2 Recall: 0.6055 - Rouge2 Fmeasure: 0.7295 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge2 Precision | Rouge2 Recall | Rouge2 Fmeasure | |:-------------:|:-----:|:-----:|:---------------:|:----------------:|:-------------:|:---------------:| | 0.0107 | 1.0 | 4449 | 0.0006 | 0.9688 | 0.6005 | 0.7229 | | 0.0022 | 2.0 | 8898 | 0.0001 | 0.9787 | 0.6054 | 0.7294 | | 0.001 | 3.0 | 13347 | 0.0000 | 0.9789 | 0.6055 | 0.7295 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
rwheel/ppo-LunarLander-v2
rwheel
2022-12-13T14:30:58Z
5
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-12T15:06:41Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 280.15 +/- 18.85 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
armargolis/LunarLander-v2
armargolis
2022-12-13T14:29:57Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T14:29:33Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 254.73 +/- 16.36 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
ibadrehman/lunarlander-v2-1
ibadrehman
2022-12-13T14:23:16Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T14:22:50Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 281.03 +/- 18.68 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Stokrotka/ppo-LunarLander-v2
Stokrotka
2022-12-13T13:38:40Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T13:38:16Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 268.84 +/- 17.73 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
alicjak/Reinforce-CartPole1
alicjak
2022-12-13T13:27:56Z
0
0
null
[ "CartPole-v1", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T12:23:13Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 134.40 +/- 22.20 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
zp2222/ddpm-butterflies-128
zp2222
2022-12-13T13:20:51Z
1
0
diffusers
[ "diffusers", "tensorboard", "en", "dataset:huggan/smithsonian_butterflies_subset", "license:apache-2.0", "diffusers:DDPMPipeline", "region:us" ]
null
2022-10-24T03:16:59Z
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/zp2222/ddpm-butterflies-128/tensorboard?#scalars)
Clawoo/q-Taxi-v3
Clawoo
2022-12-13T12:52:19Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T12:43:32Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Clawoo/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Clawoo/q-FrozenLake-v1-4x4-noSlippery
Clawoo
2022-12-13T12:38:57Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T12:31:04Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Clawoo/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
darkVOYAGE/dvMJv4
darkVOYAGE
2022-12-13T11:54:04Z
0
4
null
[ "text-to-image", "stable-diffusion", "MJv4", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2022-12-12T17:24:05Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion - MJv4 --- The "v2" of custom tuned SD model based on MJ images. Built on SD 1.5. Use it by including "dvMJv4" or "dvMJv4 style" towards the beginning of your prompt. Sample pictures of this concept: ![MJv4_v2_Thumb.jpg](https://s3.amazonaws.com/moonup/production/uploads/1670867784183-6331c100acb6472115ae666a.jpeg)
ai-project/wav2vec2-xlsr-large-vi-aiclass-20221-group-8
ai-project
2022-12-13T11:53:38Z
8
0
transformers
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "dataset:common_voice", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-11-26T20:40:38Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-vi-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-vi-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 3.2659 - Wer: 0.7160 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.3934 | 3.45 | 400 | 3.4806 | 1.0 | | 2.3392 | 6.9 | 800 | 2.1210 | 0.9011 | | 1.1786 | 10.34 | 1200 | 2.4091 | 0.7807 | | 0.779 | 13.79 | 1600 | 2.7128 | 0.7621 | | 0.5645 | 17.24 | 2000 | 3.0103 | 0.7428 | | 0.4329 | 20.69 | 2400 | 3.0804 | 0.7219 | | 0.3455 | 24.14 | 2800 | 3.1075 | 0.7190 | | 0.2803 | 27.59 | 3200 | 3.2659 | 0.7160 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
chieunq/xlm-r-base-uit-viquad
chieunq
2022-12-13T11:53:01Z
50
1
transformers
[ "transformers", "pytorch", "xlm-roberta", "question-answering", "vi", "dataset:uit-viquad", "arxiv:2009.14725", "endpoints_compatible", "region:us" ]
question-answering
2022-12-04T11:47:28Z
--- language: vi tags: - vi - xlm-roberta widget: - text: 3 thành viên trong nhóm gồm những ai ? context: "Nhóm của chúng tôi là sinh viên năm 4 trường ĐH Công Nghệ - ĐHQG Hà Nội. Nhóm gồm 3 thành viên: Nguyễn Quang Chiều, Nguyễn Quang Huy và Nguyễn Trần Anh Đức . Đây là pha Reader trong dự án cuồi kì môn Các vấn đề hiện đại trong CNTT của nhóm ." datasets: - uit-viquad metrics: - EM (exact match) : 60.63 - F1 : 79.63 --- We fined-tune model XLM-Roberta-base in UIT-vquad dataset (https://arxiv.org/pdf/2009.14725.pdf) and argument data technique ### Performance - EM (exact match) : 65.63 - F1 : 85.63 ### How to run ``` from transformers import pipeline # Replace this with your own checkpoint model_checkpoint = "chieunq/xlm-r-base-uit-viquad" question_answerer = pipeline("question-answering", model=model_checkpoint) context = """ Nhóm của chúng tôi là sinh viên năm 4 trường ĐH Công Nghệ - ĐHQG Hà Nội. Nhóm gồm 3 thành viên : Nguyễn Quang Chiều, Nguyễn Quang Huy và Nguyễn Trần Anh Đức . Đây là pha Reader trong dự án cuồi kì môn Các vấn đề hiện đại trong CNTT của nhóm . """ question = "3 thành viên trong nhóm gồm những ai ?" question_answerer(question=question, context=context) ``` ### Output ``` {'score': 0.9928902387619019, 'start': 98, 'end': 158, 'answer': 'Nguyễn Quang Chiều, Nguyễn Quang Huy và Nguyễn Trần Anh Đức.'} ``` ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.0 - Tokenizers 0.13.2
plegg/ppo-LunarLander-v2-1000000iter
plegg
2022-12-13T11:49:17Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T11:19:27Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 280.02 +/- 20.73 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
giladrefael/PPO_test
giladrefael
2022-12-13T11:47:46Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T11:47:24Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -146.98 +/- 35.23 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DeividasM/whisper-medium-lt
DeividasM
2022-12-13T11:34:30Z
18
1
transformers
[ "transformers", "pytorch", "whisper", "automatic-speech-recognition", "whisper-event", "hf-asr-leaderboard", "generated_from_trainer", "lt", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-07T20:40:30Z
--- language: - lt license: apache-2.0 tags: - whisper-event - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Medium Lithuanian results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 args: 'config: lt, split: test' metrics: - name: Wer type: wer value: 20.446244 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium Lithuanian CV11 This model is a fine-tuned version of [openai/whisper-large](https://huggingface.co/openai/whisper-medium) on the mozilla-foundation/common_voice_11_0 lt dataset. It achieves the following results on the evaluation set: - Loss: 0.354951 - Wer: 20.446244 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0056 | 9.42 | 1000 | 0.3252 | 20.5534 | | 0.0023 | 18.8 | 2000 | 0.3549 | 20.4462 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
marifulhaque/wav2vec2-teacher-student-en-asr-timit
marifulhaque
2022-12-13T11:15:31Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-13T03:00:10Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-teacher-student-en-asr-timit results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-teacher-student-en-asr-timit This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - eval_loss: 0.2592 - eval_wer: 0.3344 - eval_runtime: 79.9525 - eval_samples_per_second: 21.012 - eval_steps_per_second: 2.627 - epoch: 29.12 - step: 14500 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 32 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 1.18.3 - Tokenizers 0.13.2
GV05/xlm-roberta-base-finetuned-panx-de
GV05
2022-12-13T11:11:54Z
6
0
transformers
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-11-10T11:06:47Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.de split: train args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8638300289723342 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1358 - F1: 0.8638 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2591 | 1.0 | 525 | 0.1621 | 0.8206 | | 0.1276 | 2.0 | 1050 | 0.1379 | 0.8486 | | 0.082 | 3.0 | 1575 | 0.1358 | 0.8638 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
sryu1/Taxi
sryu1
2022-12-13T10:54:34Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T10:50:18Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="sryu1/Taxi", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
alicjak/testpyramidsrnd
alicjak
2022-12-13T10:44:42Z
3
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "unity-ml-agents", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
2022-12-13T10:44:36Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: alicjak/testpyramidsrnd 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
kpriyanshu256/whisper-medium-as-600-32-1e-05-bn
kpriyanshu256
2022-12-13T10:21:05Z
6
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "as", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-13T02:55:23Z
--- language: - as license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: openai/whisper-medium-Assamese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: as split: test args: as metrics: - name: Wer type: wer value: 58.36491608012994 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # openai/whisper-medium-Assamese This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 1.0992 - Wer: 58.3649 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 2 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 600 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0841 | 1.13 | 600 | 1.0992 | 58.3649 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
DimiNim/q-taxi-v3
DimiNim
2022-12-13T10:20:33Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T10:20:30Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.54 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="DimiNim/q-taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
sumups-ai/results
sumups-ai
2022-12-13T10:11:19Z
14
0
transformers
[ "transformers", "pytorch", "tensorboard", "bert", "token-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-12-12T11:31:15Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: results results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # results This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.4578 - Precision: 0.0060 - Recall: 0.0286 - F1: 0.0099 - Accuracy: 0.4288 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 8 | 1.6449 | 0.0 | 0.0 | 0.0 | 0.3860 | | No log | 2.0 | 16 | 1.5439 | 0.0014 | 0.0071 | 0.0023 | 0.4025 | | No log | 3.0 | 24 | 1.4986 | 0.0068 | 0.0286 | 0.0110 | 0.4176 | | No log | 4.0 | 32 | 1.4603 | 0.0033 | 0.0143 | 0.0054 | 0.4285 | | No log | 5.0 | 40 | 1.4578 | 0.0060 | 0.0286 | 0.0099 | 0.4288 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Dharkelf/Taxi-v3_2
Dharkelf
2022-12-13T09:52:03Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T09:51:52Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3_2 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Dharkelf/Taxi-v3_2", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
N00NE21483/model-trained-with-me
N00NE21483
2022-12-13T09:50:43Z
1
0
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2022-12-13T09:35:44Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Model-trained-with-me Dreambooth model trained by N00NE21483 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept:
vinowan/ddpm-butterflies-128
vinowan
2022-12-13T09:49:27Z
0
0
diffusers
[ "diffusers", "tensorboard", "en", "dataset:huggan/smithsonian_butterflies_subset", "license:apache-2.0", "diffusers:DDPMPipeline", "region:us" ]
null
2022-12-13T07:57:28Z
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/vinowan/ddpm-butterflies-128/tensorboard?#scalars)
cyk1337/ddpm-butterflies-128
cyk1337
2022-12-13T09:36:34Z
6
0
diffusers
[ "diffusers", "tensorboard", "en", "dataset:huggan/smithsonian_butterflies_subset", "license:apache-2.0", "diffusers:DDPMPipeline", "region:us" ]
null
2022-12-10T04:41:45Z
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/cyk1337/ddpm-butterflies-128/tensorboard?#scalars)
Dharkelf/q-FrozenLake-v1-4x4-noSlippery
Dharkelf
2022-12-13T09:35:49Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T09:35:38Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Dharkelf/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
WimStraetemans/q-FrozenLake-v1-4x4-noSlippery
WimStraetemans
2022-12-13T09:11:36Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T09:11:31Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Rowehn/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
PeskyAmiable/pepe
PeskyAmiable
2022-12-13T08:58:37Z
0
0
keras
[ "keras", "tf-keras", "image-classification", "region:us" ]
image-classification
2022-12-04T08:38:23Z
--- tags: - image-classification - keras library_name: keras ---
chizhikchi/cares-biobert-base
chizhikchi
2022-12-13T08:54:37Z
11
1
transformers
[ "transformers", "pytorch", "bert", "text-classification", "dataset:chizhikchi/CARES", "autotrain_compatible", "region:us" ]
text-classification
2022-09-13T07:44:13Z
--- datasets: - chizhikchi/CARES inference: false ---
parambharat/whisper-base-ta
parambharat
2022-12-13T08:54:06Z
8
0
transformers
[ "transformers", "pytorch", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "ta", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-07T19:04:05Z
--- language: - ta license: apache-2.0 tags: - whisper-event - generated_from_trainer metrics: - wer model-index: - name: Whisper Base Ta - Bharat Ramanathan results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: ta split: test metrics: - type: wer value: 15.78 name: WER - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: ta_in split: test metrics: - type: wer value: 20.41 name: WER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Base Ta - Bharat Ramanathan This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2269 - Wer: 21.7243 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:-------:| | 0.5559 | 0.1 | 1000 | 0.3963 | 35.3308 | | 0.3891 | 0.2 | 2000 | 0.3146 | 29.1511 | | 0.3425 | 0.3 | 3000 | 0.2834 | 25.5930 | | 0.3108 | 0.1 | 4000 | 0.2669 | 24.7191 | | 0.2866 | 0.1 | 5000 | 0.2596 | 25.0936 | | 0.2697 | 0.2 | 6000 | 0.2507 | 24.5943 | | 0.2421 | 0.05 | 6500 | 0.2411 | 23.0395 | | 0.2425 | 0.1 | 7000 | 0.2370 | 23.3804 | | 0.2404 | 0.15 | 7500 | 0.2333 | 22.7959 | | 0.2381 | 0.2 | 8000 | 0.2311 | 22.9420 | | 0.2429 | 0.25 | 8500 | 0.2305 | 22.0166 | | 0.2402 | 0.3 | 9000 | 0.2284 | 22.1140 | | 0.2377 | 0.35 | 9500 | 0.2271 | 22.0653 | | 0.2389 | 0.4 | 10000 | 0.2269 | 21.7243 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
chizhikchi/cares-roberta-bne
chizhikchi
2022-12-13T08:53:00Z
6
1
transformers
[ "transformers", "pytorch", "roberta", "text-classification", "dataset:chizhikchi/CARES", "autotrain_compatible", "region:us" ]
text-classification
2022-12-02T08:49:17Z
--- datasets: - chizhikchi/CARES inference: false ---
leoleung93/q-FrozenLake-v1-4x4-noSlippery
leoleung93
2022-12-13T08:48:15Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T08:47:11Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="leoleung93/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
haroonrahimi/wav2vec2-large-xls-r-300m-pu-colab
haroonrahimi
2022-12-13T08:37:42Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "dataset:common_voice", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-13T08:04:27Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-pu-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-pu-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
thkkvui/xlm-roberta-base-finetuned-panx-all
thkkvui
2022-12-13T08:29:16Z
7
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-12-13T07:29:37Z
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-all This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1737 - F1: 0.8521 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.305 | 1.0 | 835 | 0.1944 | 0.7968 | | 0.1569 | 2.0 | 1670 | 0.1759 | 0.8395 | | 0.1027 | 3.0 | 2505 | 0.1737 | 0.8521 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.13.0.dev20220711 - Datasets 2.4.0 - Tokenizers 0.12.1
tkazusa/lilt-en-funsd-org
tkazusa
2022-12-13T08:09:01Z
9
0
transformers
[ "transformers", "pytorch", "tensorboard", "lilt", "token-classification", "generated_from_trainer", "dataset:funsd-layoutlmv3", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-12-13T08:04:48Z
--- license: mit tags: - generated_from_trainer datasets: - funsd-layoutlmv3 model-index: - name: lilt-en-funsd-org results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # lilt-en-funsd-org This model is a fine-tuned version of [SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) on the funsd-layoutlmv3 dataset. It achieves the following results on the evaluation set: - Loss: 1.8428 - Answer: {'precision': 0.047225501770956316, 'recall': 0.09791921664626684, 'f1': 0.06371963361210674, 'number': 817} - Header: {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 119} - Question: {'precision': 0.08554412560909583, 'recall': 0.2934076137418756, 'f1': 0.13246698805281912, 'number': 1077} - Overall Precision: 0.0730 - Overall Recall: 0.1967 - Overall F1: 0.1065 - Overall Accuracy: 0.2652 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
parambharat/whisper-small-ml
parambharat
2022-12-13T08:05:36Z
6
2
transformers
[ "transformers", "pytorch", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "ml", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-11T14:21:18Z
--- language: - ml license: apache-2.0 tags: - whisper-event - generated_from_trainer metrics: - wer model-index: - name: Whisper Small ML - Bharat Ramanathan results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: ml split: test metrics: - type: wer value: 25.8 name: WER - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: ml_in split: test metrics: - type: wer value: 48.16 name: WER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small ML - Bharat Ramanathan This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2308 - Wer: 36.7397 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 3000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.1275 | 4.03 | 500 | 0.1630 | 35.4015 | | 0.09 | 9.02 | 1000 | 0.1821 | 40.0243 | | 0.062 | 14.01 | 1500 | 0.2004 | 37.7129 | | 0.0441 | 19.0 | 2000 | 0.2105 | 36.2530 | | 0.0335 | 23.03 | 2500 | 0.2250 | 37.7129 | | 0.0276 | 28.02 | 3000 | 0.2308 | 36.7397 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
weijiang2009/AlgmonQuestingAnsweringModel-finetune
weijiang2009
2022-12-13T07:59:31Z
5
1
transformers
[ "transformers", "pytorch", "en", "dataset:algmon_vertical_domian_0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-12-13T07:11:56Z
--- language: en widget: - text: "The scale, variety, and quantity of publicly-available NLP datasets has grown rapidly as researchers propose new tasks, larger models, and novel benchmarks. Datasets is a community library for contemporary NLP designed to support this ecosystem. Datasets aims to standardize end-user interfaces, versioning, and documentation, while providing a lightweight front-end that behaves similarly for small datasets as for internet-scale corpora. The design of the library incorporates a distributed, community-driven approach to adding datasets and documenting usage. After a year of development, the library now includes more than 650 unique datasets, has more than 250 contributors, and has helped support a variety of novel cross-dataset research projects and shared tasks. The library is available at https://github.com/huggingface/datasets." datasets: - algmon_vertical_domian_0 model-index: - name: weijiang2009/AlgmonQuestingAnsweringModel-finetune results: - task: type: question-answering name: Question Answering dataset: name: squad_v2 type: squad_v2 config: squad_v2 split: validation metrics: - type: exact_match value: 79.9309 name: Exact Match verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMDhhNjg5YzNiZGQ1YTIyYTAwZGUwOWEzZTRiYzdjM2QzYjA3ZTUxNDM1NjE1MTUyMjE1MGY1YzEzMjRjYzVjYiIsInZlcnNpb24iOjF9.EH5JJo8EEFwU7osPz3s7qanw_tigeCFhCXjSfyN0Y1nWVnSfulSxIk_DbAEI5iE80V4EKLyp5-mYFodWvL2KDA - type: f1 value: 82.9501 name: F1 verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMjk5ZDYwOGQyNjNkMWI0OTE4YzRmOTlkY2JjNjQ0YTZkNTMzMzNkYTA0MDFmNmI3NjA3NjNlMjhiMDQ2ZjJjNSIsInZlcnNpb24iOjF9.DDm0LNTkdLbGsue58bg1aH_s67KfbcmkvL-6ZiI2s8IoxhHJMSf29H_uV2YLyevwx900t-MwTVOW3qfFnMMEAQ - type: total value: 11869 name: total verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMGFkMmI2ODM0NmY5NGNkNmUxYWViOWYxZDNkY2EzYWFmOWI4N2VhYzY5MGEzMTVhOTU4Zjc4YWViOGNjOWJjMCIsInZlcnNpb24iOjF9.fexrU1icJK5_MiifBtZWkeUvpmFISqBLDXSQJ8E6UnrRof-7cU0s4tX_dIsauHWtUpIHMPZCf5dlMWQKXZuAAA --- # Algmon domain specific finetune for questioning & answering service * Algmon domain specific * base + finetune
thkkvui/xlm-roberta-base-finetuned-panx-en
thkkvui
2022-12-13T07:26:56Z
7
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-12-13T07:20:08Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-en results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.en metrics: - name: F1 type: f1 value: 0.6699779249448123 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.4004 - F1: 0.6700 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.1798 | 1.0 | 50 | 0.6616 | 0.4612 | | 0.5404 | 2.0 | 100 | 0.4206 | 0.6551 | | 0.3714 | 3.0 | 150 | 0.4004 | 0.6700 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.13.0.dev20220711 - Datasets 2.4.0 - Tokenizers 0.12.1
devemrekoc/sd-tunable
devemrekoc
2022-12-13T07:24:12Z
0
1
diffusers
[ "diffusers", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2022-12-12T08:30:59Z
--- license: creativeml-openrail-m --- Stable Diffusion v1-5 with the fine-tuned VAE `sd-vae-ft-mse` and files with config modifications for making it better to fine-tune made by [fast-stable-diffusion by TheLastBen](https://github.com/TheLastBen/fast-stable-diffusion) to be used on [fastDreambooth Colab Notebook](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) and on the [Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) Is not suited for inference and training elsewhere is under your own risk. The [model LICENSE](https://huggingface.co/spaces/CompVis/stable-diffusion-license) still applies normally for this use-case. Refer to the [original repository](https://huggingface.co/runwayml/stable-diffusion-v1-5) for the model card
thkkvui/xlm-roberta-base-finetuned-panx-it
thkkvui
2022-12-13T07:19:41Z
7
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-12-13T07:11:40Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-it results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.it metrics: - name: F1 type: f1 value: 0.8168094655242758 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-it This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2601 - F1: 0.8168 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.8182 | 1.0 | 70 | 0.3477 | 0.7319 | | 0.3068 | 2.0 | 140 | 0.2838 | 0.7765 | | 0.193 | 3.0 | 210 | 0.2601 | 0.8168 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.13.0.dev20220711 - Datasets 2.4.0 - Tokenizers 0.12.1
aparna1595/bert-mpnet-sentence
aparna1595
2022-12-13T07:04:45Z
1
0
sentence-transformers
[ "sentence-transformers", "pytorch", "feature-extraction", "sentence-similarity", "transformers", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
2022-12-13T06:05:10Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch def cls_pooling(model_output, attention_mask): return model_output[0][:,0] # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1 with parameters: ``` {'batch_size': 2, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 3, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
sryu1/ppo-LunarLander-v2
sryu1
2022-12-13T06:57:34Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T06:43:30Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 288.93 +/- 19.72 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
ljh838/inpaint
ljh838
2022-12-13T06:32:54Z
0
0
null
[ "region:us" ]
null
2022-12-13T06:32:29Z
--- title: Paint by example emoji: 🔥 colorFrom: green colorTo: pink sdk: gradio sdk_version: 3.6 app_file: app.py pinned: false duplicated_from: akhaliq/paint-by-example --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
SteffenSeiffarth/whisper-calls-small
SteffenSeiffarth
2022-12-13T06:31:57Z
4
0
transformers
[ "transformers", "pytorch", "whisper", "automatic-speech-recognition", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-12T10:15:38Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-calls-small results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-calls-small This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. Just a test, probably not a very good model ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.076 | 4.03 | 1000 | 0.0481 | 4.1883 | | 0.0068 | 8.06 | 2000 | 0.0049 | 0.6362 | | 0.0011 | 12.1 | 3000 | 0.0012 | 0.0157 | | 0.0005 | 16.13 | 4000 | 0.0006 | 0.0 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
mssongit/fndeberta-sentence
mssongit
2022-12-13T05:47:39Z
1
0
sentence-transformers
[ "sentence-transformers", "pytorch", "deberta-v2", "feature-extraction", "sentence-similarity", "transformers", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
2022-12-13T05:27:43Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # mssongit/fndeberta-sentence This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('mssongit/fndeberta-sentence') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('mssongit/fndeberta-sentence') model = AutoModel.from_pretrained('mssongit/fndeberta-sentence') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=mssongit/fndeberta-sentence) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 365 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 6, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 219, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: DebertaV2Model (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
alaaawad/ddpm-celebahq-finetuned-butterflies-2epochs
alaaawad
2022-12-13T05:01:21Z
1
0
diffusers
[ "diffusers", "pytorch", "unconditional-image-generation", "diffusion-models-class", "license:mit", "diffusers:DDPMPipeline", "region:us" ]
unconditional-image-generation
2022-12-13T05:00:12Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Example Fine-Tuned Model for Unit 2 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) Describe your model here ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('alaaawad/ddpm-celebahq-finetuned-butterflies-2epochs') image = pipeline().images[0] image ```
Shubham09/LISA_Whisper_medium_en
Shubham09
2022-12-13T04:58:21Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-13T04:46:11Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: LISA_Whisper_medium_en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # LISA_Whisper_medium_en This model is a fine-tuned version of [openai/whisper-medium.en](https://huggingface.co/openai/whisper-medium.en) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 10 - training_steps: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | No log | 1.13 | 8 | 2.1066 | 44.2353 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1 - Datasets 2.7.1 - Tokenizers 0.13.2
abhinig2001/ppo-LunarLander-v2
abhinig2001
2022-12-13T04:47:22Z
1
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-11T02:10:24Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 264.04 +/- 16.16 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
jonatasgrosman/whisper-small-pt-cv11-v5
jonatasgrosman
2022-12-13T04:38:49Z
6
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "pt", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-12T21:41:36Z
--- language: - pt license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Portuguese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 pt type: mozilla-foundation/common_voice_11_0 config: pt split: test args: pt metrics: - name: Wer type: wer value: 14.684129429892142 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Portuguese This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 pt dataset. It achieves the following results on the evaluation set: - Loss: 0.3056 - Wer: 14.6841 - Cer: 5.8856 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:| | 0.2817 | 0.92 | 500 | 0.3352 | 15.9476 | 6.3609 | | 0.2245 | 1.84 | 1000 | 0.3047 | 15.0231 | 5.9326 | | 0.1587 | 2.76 | 1500 | 0.2985 | 15.0847 | 5.9326 | | 0.1181 | 3.68 | 2000 | 0.3056 | 14.6841 | 5.8856 | | 0.0741 | 4.6 | 2500 | 0.3162 | 14.9923 | 5.9906 | | 0.0438 | 5.52 | 3000 | 0.3466 | 15.4700 | 6.2255 | | 0.0294 | 6.45 | 3500 | 0.3799 | 15.2234 | 6.1647 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu116 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
KoichiYasuoka/roberta-base-thai-syllable
KoichiYasuoka
2022-12-13T03:41:23Z
5
0
transformers
[ "transformers", "pytorch", "roberta", "fill-mask", "thai", "masked-lm", "wikipedia", "th", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-03-02T23:29:04Z
--- language: - "th" tags: - "thai" - "masked-lm" - "wikipedia" license: "apache-2.0" pipeline_tag: "fill-mask" mask_token: "<mask>" widget: - text: "แผนกนี้กำลัง<mask>กับความท้าทายใหม่" --- # roberta-base-thai-syllable ## Model Description This is a RoBERTa model pre-trained on Thai Wikipedia texts, derived from [wangchanberta-base-wiki-syllable](https://huggingface.co/airesearch/wangchanberta-base-wiki-syllable). Character-embeddings are modified to use BertTokenizerFast. You can fine-tune `roberta-base-thai-syllable` for downstream tasks, such as [POS-tagging](https://huggingface.co/KoichiYasuoka/roberta-base-thai-syllable-upos), [dependency-parsing](https://huggingface.co/KoichiYasuoka/roberta-base-thai-syllable-ud-goeswith), and so on. ## How to Use ```py from transformers import AutoTokenizer,AutoModelForMaskedLM tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/roberta-base-thai-syllable") model=AutoModelForMaskedLM.from_pretrained("KoichiYasuoka/roberta-base-thai-syllable") ```
aabayomi/ppo-LunarLander-v2
aabayomi
2022-12-13T03:41:11Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T02:25:02Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo-v2 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 280.05 +/- 20.39 name: mean_reward verified: false --- # **ppo-v2** Agent playing **LunarLander-v2** This is a trained model of a **ppo-v2** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
KoichiYasuoka/roberta-base-thai-char
KoichiYasuoka
2022-12-13T03:39:37Z
12
0
transformers
[ "transformers", "pytorch", "roberta", "fill-mask", "thai", "masked-lm", "wikipedia", "th", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-03-02T23:29:04Z
--- language: - "th" tags: - "thai" - "masked-lm" - "wikipedia" license: "apache-2.0" pipeline_tag: "fill-mask" mask_token: "[MASK]" --- # roberta-base-thai-char ## Model Description This is a RoBERTa model pre-trained on Thai Wikipedia texts with character-wise embeddings to use BertTokenizerFast. You can fine-tune `roberta-base-thai-char` for downstream tasks, such as [POS-tagging](https://huggingface.co/KoichiYasuoka/roberta-base-thai-char-upos), [dependency-parsing](https://huggingface.co/KoichiYasuoka/roberta-base-thai-char-ud-goeswith), and so on. ## How to Use ```py from transformers import AutoTokenizer,AutoModelForMaskedLM tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/roberta-base-thai-char") model=AutoModelForMaskedLM.from_pretrained("KoichiYasuoka/roberta-base-thai-char") ```
thkkvui/xlm-roberta-base-finetuned-panx-fr
thkkvui
2022-12-13T03:19:01Z
8
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-12-13T03:03:54Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-fr results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.fr metrics: - name: F1 type: f1 value: 0.8387205387205388 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2761 - F1: 0.8387 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.6255 | 1.0 | 191 | 0.3532 | 0.7791 | | 0.2717 | 2.0 | 382 | 0.2793 | 0.8255 | | 0.1761 | 3.0 | 573 | 0.2761 | 0.8387 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.13.0.dev20220711 - Datasets 2.4.0 - Tokenizers 0.12.1
tablesalt/sd-class-butterflies-32
tablesalt
2022-12-13T03:18:39Z
2
0
diffusers
[ "diffusers", "pytorch", "unconditional-image-generation", "diffusion-models-class", "license:mit", "diffusers:DDPMPipeline", "region:us" ]
unconditional-image-generation
2022-12-13T03:16:57Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('tablesalt/sd-class-butterflies-32') image = pipeline().images[0] image ```
abhinig2001/ppo-Huggy
abhinig2001
2022-12-13T02:52:58Z
1
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "unity-ml-agents", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
2022-12-13T02:52:51Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: abhinig2001/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
sergey-antonov/q-Taxi-v3
sergey-antonov
2022-12-13T02:42:52Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T02:42:43Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="sergey-antonov/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
jonatasgrosman/whisper-small-pt-cv11-v4_2
jonatasgrosman
2022-12-13T02:32:00Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "pt", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-12T23:01:44Z
--- language: - pt license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Portuguese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 pt type: mozilla-foundation/common_voice_11_0 config: pt split: test args: pt metrics: - name: Wer type: wer value: 14.237288135593221 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Portuguese This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 pt dataset. It achieves the following results on the evaluation set: - Loss: 0.3023 - Wer: 14.2373 - Cer: 5.5236 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:| | 1.1113 | 0.92 | 500 | 0.3897 | 16.8721 | 6.7919 | | 0.9009 | 1.84 | 1000 | 0.3318 | 15.9322 | 6.2310 | | 0.7631 | 2.76 | 1500 | 0.3177 | 15.4854 | 5.8939 | | 0.7163 | 3.68 | 2000 | 0.3130 | 14.8998 | 5.7972 | | 0.6334 | 4.6 | 2500 | 0.3034 | 14.7920 | 5.6867 | | 0.5746 | 5.52 | 3000 | 0.3029 | 14.6225 | 5.6397 | | 0.5359 | 6.45 | 3500 | 0.3018 | 14.4838 | 5.5789 | | 0.5058 | 7.37 | 4000 | 0.3010 | 14.5917 | 5.6839 | | 0.4833 | 8.29 | 4500 | 0.3023 | 14.2373 | 5.5236 | | 0.4398 | 9.21 | 5000 | 0.3005 | 14.4222 | 5.5844 | | 0.4359 | 10.13 | 5500 | 0.2999 | 14.4838 | 5.6259 | | 0.4036 | 11.05 | 6000 | 0.2995 | 14.2835 | 5.5623 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu116 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
sergey-antonov/q-FrozenLake-v1-4x4-noSlippery
sergey-antonov
2022-12-13T01:55:12Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T01:39:33Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="sergey-antonov/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
jontang/ppo-LunarLander
jontang
2022-12-13T01:11:57Z
1
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-13T01:11:32Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 256.08 +/- 12.77 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
hli/xlm-roberta-base-finetuned-panx-de
hli
2022-12-13T00:41:37Z
6
0
transformers
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-12-13T00:07:40Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.de split: train args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8638300289723342 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1358 - F1: 0.8638 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2591 | 1.0 | 525 | 0.1621 | 0.8206 | | 0.1276 | 2.0 | 1050 | 0.1379 | 0.8486 | | 0.082 | 3.0 | 1575 | 0.1358 | 0.8638 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Isaacp/ppo-Huggy
Isaacp
2022-12-13T00:15:39Z
2
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "unity-ml-agents", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
2022-12-13T00:15:31Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: Isaacp/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Squiggles112/autotrain-hamantest-2444675858
Squiggles112
2022-12-13T00:06:37Z
22
0
transformers
[ "transformers", "pytorch", "autotrain", "vision", "image-classification", "dataset:lehiko/autotrain-data-hamantest", "co2_eq_emissions", "endpoints_compatible", "region:us" ]
image-classification
2022-12-13T00:06:11Z
--- tags: - autotrain - vision - image-classification datasets: - lehiko/autotrain-data-hamantest widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.36899783929655716 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2444675858 - CO2 Emissions (in grams): 0.3690 ## Validation Metrics - Loss: 0.634 - Accuracy: 0.800 - Precision: 0.000 - Recall: 0.000 - AUC: 0.000 - F1: 0.000
Praboda/distilbert-base-uncased-finetuned-emotion
Praboda
2022-12-12T23:13:00Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-12-12T23:04:40Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2232 - Accuracy: 0.9255 - F1: 0.9255 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8156 | 1.0 | 250 | 0.3216 | 0.906 | 0.9035 | | 0.2486 | 2.0 | 500 | 0.2232 | 0.9255 | 0.9255 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.13.0+cu116 - Datasets 1.16.1 - Tokenizers 0.10.3
BramVanroy/bert-base-multilingual-cased-hebban-reviews5
BramVanroy
2022-12-12T23:07:55Z
7
0
transformers
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "sentiment-analysis", "dutch", "text", "nl", "dataset:BramVanroy/hebban-reviews", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-29T07:37:05Z
--- datasets: - BramVanroy/hebban-reviews language: - nl license: mit metrics: - accuracy - f1 - precision - qwk - recall model-index: - name: bert-base-multilingual-cased-hebban-reviews5 results: - dataset: config: filtered_rating name: BramVanroy/hebban-reviews - filtered_rating - 2.0.0 revision: 2.0.0 split: test type: BramVanroy/hebban-reviews metrics: - name: Test accuracy type: accuracy value: 0.5898668639053254 - name: Test f1 type: f1 value: 0.5899204480029937 - name: Test precision type: precision value: 0.5971431895675179 - name: Test qwk type: qwk value: 0.7050840079198698 - name: Test recall type: recall value: 0.5898668639053254 task: name: sentiment analysis type: text-classification tags: - sentiment-analysis - dutch - text widget: - text: Wauw, wat een leuk boek! Ik heb me er er goed mee vermaakt. - text: Nee, deze vond ik niet goed. De auteur doet zijn best om je als lezer mee te trekken in het verhaal maar mij overtuigt het alleszins niet. - text: Ik vind het niet slecht maar de schrijfstijl trekt me ook niet echt aan. Het wordt een beetje saai vanaf het vijfde hoofdstuk --- # bert-base-multilingual-cased-hebban-reviews5 # Dataset - dataset_name: BramVanroy/hebban-reviews - dataset_config: filtered_rating - dataset_revision: 2.0.0 - labelcolumn: review_rating0 - textcolumn: review_text_without_quotes # Training - optim: adamw_hf - learning_rate: 5e-05 - per_device_train_batch_size: 64 - per_device_eval_batch_size: 64 - gradient_accumulation_steps: 1 - max_steps: 5001 - save_steps: 500 - metric_for_best_model: qwk # Best checkedpoint based on validation - best_metric: 0.697825193570947 - best_model_checkpoint: trained/hebban-reviews5/bert-base-multilingual-cased/checkpoint-4500 # Test results of best checkpoint - accuracy: 0.5898668639053254 - f1: 0.5899204480029937 - precision: 0.5971431895675179 - qwk: 0.7050840079198698 - recall: 0.5898668639053254 ## Confusion matrix ![cfm](fig/test_confusion_matrix.png) ## Normalized confusion matrix ![norm cfm](fig/test_confusion_matrix_norm.png) # Environment - cuda_capabilities: 8.0; 8.0 - cuda_device_count: 2 - cuda_devices: NVIDIA A100-SXM4-80GB; NVIDIA A100-SXM4-80GB - finetuner_commit: 8159b4c1d5e66b36f68dd263299927ffb8670ebd - platform: Linux-4.18.0-305.49.1.el8_4.x86_64-x86_64-with-glibc2.28 - python_version: 3.9.5 - toch_version: 1.10.0 - transformers_version: 4.21.0
numan966/ppo-LunarLander-v2
numan966
2022-12-12T23:06:02Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-10T19:57:16Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 283.09 +/- 15.42 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
bjarlestam/ppo-Huggy
bjarlestam
2022-12-12T22:47:47Z
2
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "unity-ml-agents", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
2022-12-12T22:35:41Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/ThomasSimonini/Huggy 2. Step 1: Write your model_id: bjarlestam/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
zhoppy/deep-rl-course-ppo-LunarLander-v2
zhoppy
2022-12-12T22:43:21Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-12T22:42:54Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 251.20 +/- 22.80 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
NeelK94/ppo-LunarLander-v2
NeelK94
2022-12-12T22:43:17Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-12T22:42:54Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 243.03 +/- 36.09 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
adityakanthi123/training_simcse_100k_unsup
adityakanthi123
2022-12-12T22:35:09Z
1
0
sentence-transformers
[ "sentence-transformers", "pytorch", "roberta", "feature-extraction", "sentence-similarity", "transformers", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
sentence-similarity
2022-12-05T15:28:03Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1566 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 2, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 314, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 64, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
HayLahav/ppo-LunarLander-v2
HayLahav
2022-12-12T22:24:32Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-12-11T23:52:05Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 266.36 +/- 15.49 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
anuragshas/whisper-large-v2-pa-IN
anuragshas
2022-12-12T22:23:09Z
6
0
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "pa", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-12T20:53:26Z
--- language: - pa license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Large-v2 Punjabi results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 pa-IN type: mozilla-foundation/common_voice_11_0 config: pa-IN split: test args: pa-IN metrics: - name: Wer type: wer value: 21.27310061601643 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Large-v2 Punjabi This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the mozilla-foundation/common_voice_11_0 pa-IN dataset. It achieves the following results on the evaluation set: - Loss: 0.3382 - Wer: 21.2731 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 500 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0002 | 14.29 | 500 | 0.3382 | 21.2731 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
DrishtiSharma/whisper-medium-serbian-v1
DrishtiSharma
2022-12-12T22:20:11Z
5
1
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "sr", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-12T18:07:33Z
--- language: - sr license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Medium Serbian - Drishti Sharma results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: sr split: test args: sr metrics: - name: Wer type: wer value: 11.817078106029948 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium Serbian - Drishti Sharma This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.4127 - Wer: 11.8171 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - training_steps: 1500 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0002 | 28.3 | 1500 | 0.4127 | 11.8171 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2